applied-ai-018 commited on
Commit
3cc28fd
·
verified ·
1 Parent(s): 2c059b9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__init__.py +148 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/configuration_bart.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/convert_bart_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/modeling_bart.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/modeling_flax_bart.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/modeling_tf_bart.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/tokenization_bart.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/tokenization_bart_fast.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/configuration_bart.py +401 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py +157 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/modeling_bart.py +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/modeling_flax_bart.py +1995 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/modeling_tf_bart.py +1712 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/tokenization_bart.py +390 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/bart/tokenization_bart_fast.py +276 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__init__.py +197 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/configuration_bert.py +153 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py +245 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py +63 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/modeling_bert.py +1867 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/modeling_flax_bert.py +1713 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/modeling_tf_bert.py +2114 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py +172 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_tf.py +254 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__init__.py +28 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/convert_byt5_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py +60 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py +234 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__init__.py +81 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/__init__.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/configuration_cvt.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/convert_cvt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_cvt.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/configuration_cvt.py +146 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py +362 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/modeling_cvt.py +725 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/modeling_tf_cvt.py +1097 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__init__.py +69 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/__init__.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/configuration_phi.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/modeling_phi.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/configuration_phi.py +191 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/convert_phi_weights_to_hf.py +207 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/phi/modeling_phi.py +1489 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__init__.py +60 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__init__.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_bart": ["BART_PRETRAINED_CONFIG_ARCHIVE_MAP", "BartConfig", "BartOnnxConfig"],
28
+ "tokenization_bart": ["BartTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_bart_fast"] = ["BartTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_bart"] = [
46
+ "BART_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "BartForCausalLM",
48
+ "BartForConditionalGeneration",
49
+ "BartForQuestionAnswering",
50
+ "BartForSequenceClassification",
51
+ "BartModel",
52
+ "BartPreTrainedModel",
53
+ "BartPretrainedModel",
54
+ "PretrainedBartModel",
55
+ ]
56
+
57
+ try:
58
+ if not is_tf_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ _import_structure["modeling_tf_bart"] = [
64
+ "TFBartForConditionalGeneration",
65
+ "TFBartForSequenceClassification",
66
+ "TFBartModel",
67
+ "TFBartPretrainedModel",
68
+ ]
69
+
70
+ try:
71
+ if not is_flax_available():
72
+ raise OptionalDependencyNotAvailable()
73
+ except OptionalDependencyNotAvailable:
74
+ pass
75
+ else:
76
+ _import_structure["modeling_flax_bart"] = [
77
+ "FlaxBartDecoderPreTrainedModel",
78
+ "FlaxBartForCausalLM",
79
+ "FlaxBartForConditionalGeneration",
80
+ "FlaxBartForQuestionAnswering",
81
+ "FlaxBartForSequenceClassification",
82
+ "FlaxBartModel",
83
+ "FlaxBartPreTrainedModel",
84
+ ]
85
+
86
+ if TYPE_CHECKING:
87
+ from .configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig, BartOnnxConfig
88
+ from .tokenization_bart import BartTokenizer
89
+
90
+ try:
91
+ if not is_tokenizers_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .tokenization_bart_fast import BartTokenizerFast
97
+
98
+ try:
99
+ if not is_torch_available():
100
+ raise OptionalDependencyNotAvailable()
101
+ except OptionalDependencyNotAvailable:
102
+ pass
103
+ else:
104
+ from .modeling_bart import (
105
+ BART_PRETRAINED_MODEL_ARCHIVE_LIST,
106
+ BartForCausalLM,
107
+ BartForConditionalGeneration,
108
+ BartForQuestionAnswering,
109
+ BartForSequenceClassification,
110
+ BartModel,
111
+ BartPreTrainedModel,
112
+ BartPretrainedModel,
113
+ PretrainedBartModel,
114
+ )
115
+
116
+ try:
117
+ if not is_tf_available():
118
+ raise OptionalDependencyNotAvailable()
119
+ except OptionalDependencyNotAvailable:
120
+ pass
121
+ else:
122
+ from .modeling_tf_bart import (
123
+ TFBartForConditionalGeneration,
124
+ TFBartForSequenceClassification,
125
+ TFBartModel,
126
+ TFBartPretrainedModel,
127
+ )
128
+
129
+ try:
130
+ if not is_flax_available():
131
+ raise OptionalDependencyNotAvailable()
132
+ except OptionalDependencyNotAvailable:
133
+ pass
134
+ else:
135
+ from .modeling_flax_bart import (
136
+ FlaxBartDecoderPreTrainedModel,
137
+ FlaxBartForCausalLM,
138
+ FlaxBartForConditionalGeneration,
139
+ FlaxBartForQuestionAnswering,
140
+ FlaxBartForSequenceClassification,
141
+ FlaxBartModel,
142
+ FlaxBartPreTrainedModel,
143
+ )
144
+
145
+ else:
146
+ import sys
147
+
148
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/configuration_bart.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/convert_bart_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/modeling_bart.cpython-310.pyc ADDED
Binary file (69.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/modeling_flax_bart.cpython-310.pyc ADDED
Binary file (53.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/modeling_tf_bart.cpython-310.pyc ADDED
Binary file (54.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/tokenization_bart.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/__pycache__/tokenization_bart_fast.cpython-310.pyc ADDED
Binary file (9.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/configuration_bart.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BART model configuration"""
16
+ import warnings
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
23
+ from ...onnx.utils import compute_effective_axis_dimension
24
+ from ...utils import TensorType, is_torch_available, logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ class BartConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`BartModel`]. It is used to instantiate a BART
33
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the BART
35
+ [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 50265):
43
+ Vocabulary size of the BART model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`BartModel`] or [`TFBartModel`].
45
+ d_model (`int`, *optional*, defaults to 1024):
46
+ Dimensionality of the layers and the pooler layer.
47
+ encoder_layers (`int`, *optional*, defaults to 12):
48
+ Number of encoder layers.
49
+ decoder_layers (`int`, *optional*, defaults to 12):
50
+ Number of decoder layers.
51
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
54
+ Number of attention heads for each attention layer in the Transformer decoder.
55
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
56
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
57
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
58
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
59
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
62
+ dropout (`float`, *optional*, defaults to 0.1):
63
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
64
+ attention_dropout (`float`, *optional*, defaults to 0.0):
65
+ The dropout ratio for the attention probabilities.
66
+ activation_dropout (`float`, *optional*, defaults to 0.0):
67
+ The dropout ratio for activations inside the fully connected layer.
68
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
69
+ The dropout ratio for classifier.
70
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
71
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
72
+ just in case (e.g., 512 or 1024 or 2048).
73
+ init_std (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
76
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
77
+ for more details.
78
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
79
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
80
+ for more details.
81
+ scale_embedding (`bool`, *optional*, defaults to `False`):
82
+ Scale embeddings by diving by sqrt(d_model).
83
+ use_cache (`bool`, *optional*, defaults to `True`):
84
+ Whether or not the model should return the last key/values attentions (not used by all models).
85
+ num_labels (`int`, *optional*, defaults to 3):
86
+ The number of labels to use in [`BartForSequenceClassification`].
87
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
88
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
89
+ `eos_token_id`.
90
+
91
+ Example:
92
+
93
+ ```python
94
+ >>> from transformers import BartConfig, BartModel
95
+
96
+ >>> # Initializing a BART facebook/bart-large style configuration
97
+ >>> configuration = BartConfig()
98
+
99
+ >>> # Initializing a model (with random weights) from the facebook/bart-large style configuration
100
+ >>> model = BartModel(configuration)
101
+
102
+ >>> # Accessing the model configuration
103
+ >>> configuration = model.config
104
+ ```"""
105
+
106
+ model_type = "bart"
107
+ keys_to_ignore_at_inference = ["past_key_values"]
108
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
109
+
110
+ def __init__(
111
+ self,
112
+ vocab_size=50265,
113
+ max_position_embeddings=1024,
114
+ encoder_layers=12,
115
+ encoder_ffn_dim=4096,
116
+ encoder_attention_heads=16,
117
+ decoder_layers=12,
118
+ decoder_ffn_dim=4096,
119
+ decoder_attention_heads=16,
120
+ encoder_layerdrop=0.0,
121
+ decoder_layerdrop=0.0,
122
+ activation_function="gelu",
123
+ d_model=1024,
124
+ dropout=0.1,
125
+ attention_dropout=0.0,
126
+ activation_dropout=0.0,
127
+ init_std=0.02,
128
+ classifier_dropout=0.0,
129
+ scale_embedding=False,
130
+ use_cache=True,
131
+ num_labels=3,
132
+ pad_token_id=1,
133
+ bos_token_id=0,
134
+ eos_token_id=2,
135
+ is_encoder_decoder=True,
136
+ decoder_start_token_id=2,
137
+ forced_eos_token_id=2,
138
+ **kwargs,
139
+ ):
140
+ self.vocab_size = vocab_size
141
+ self.max_position_embeddings = max_position_embeddings
142
+ self.d_model = d_model
143
+ self.encoder_ffn_dim = encoder_ffn_dim
144
+ self.encoder_layers = encoder_layers
145
+ self.encoder_attention_heads = encoder_attention_heads
146
+ self.decoder_ffn_dim = decoder_ffn_dim
147
+ self.decoder_layers = decoder_layers
148
+ self.decoder_attention_heads = decoder_attention_heads
149
+ self.dropout = dropout
150
+ self.attention_dropout = attention_dropout
151
+ self.activation_dropout = activation_dropout
152
+ self.activation_function = activation_function
153
+ self.init_std = init_std
154
+ self.encoder_layerdrop = encoder_layerdrop
155
+ self.decoder_layerdrop = decoder_layerdrop
156
+ self.classifier_dropout = classifier_dropout
157
+ self.use_cache = use_cache
158
+ self.num_hidden_layers = encoder_layers
159
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
160
+
161
+ super().__init__(
162
+ num_labels=num_labels,
163
+ pad_token_id=pad_token_id,
164
+ bos_token_id=bos_token_id,
165
+ eos_token_id=eos_token_id,
166
+ is_encoder_decoder=is_encoder_decoder,
167
+ decoder_start_token_id=decoder_start_token_id,
168
+ forced_eos_token_id=forced_eos_token_id,
169
+ **kwargs,
170
+ )
171
+
172
+ # ensure backward compatibility for BART CNN models
173
+ if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
174
+ self.forced_bos_token_id = self.bos_token_id
175
+ warnings.warn(
176
+ f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
177
+ "The config can simply be saved and uploaded again to be fixed."
178
+ )
179
+
180
+
181
+ class BartOnnxConfig(OnnxSeq2SeqConfigWithPast):
182
+ @property
183
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
184
+ if self.task in ["default", "seq2seq-lm"]:
185
+ common_inputs = OrderedDict(
186
+ [
187
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
188
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
189
+ ]
190
+ )
191
+
192
+ if self.use_past:
193
+ common_inputs["decoder_input_ids"] = {0: "batch"}
194
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
195
+ else:
196
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
197
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
198
+
199
+ if self.use_past:
200
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
201
+ elif self.task == "causal-lm":
202
+ # TODO: figure this case out.
203
+ common_inputs = OrderedDict(
204
+ [
205
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
206
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
207
+ ]
208
+ )
209
+ if self.use_past:
210
+ num_encoder_layers, _ = self.num_layers
211
+ for i in range(num_encoder_layers):
212
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
213
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
214
+ else:
215
+ common_inputs = OrderedDict(
216
+ [
217
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
218
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
219
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
220
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
221
+ ]
222
+ )
223
+
224
+ return common_inputs
225
+
226
+ @property
227
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
228
+ if self.task in ["default", "seq2seq-lm"]:
229
+ common_outputs = super().outputs
230
+ else:
231
+ common_outputs = super(OnnxConfigWithPast, self).outputs
232
+ if self.use_past:
233
+ num_encoder_layers, _ = self.num_layers
234
+ for i in range(num_encoder_layers):
235
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
236
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
237
+ return common_outputs
238
+
239
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
240
+ self,
241
+ tokenizer: PreTrainedTokenizer,
242
+ batch_size: int = -1,
243
+ seq_length: int = -1,
244
+ is_pair: bool = False,
245
+ framework: Optional[TensorType] = None,
246
+ ) -> Mapping[str, Any]:
247
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
248
+ tokenizer, batch_size, seq_length, is_pair, framework
249
+ )
250
+
251
+ # Generate decoder inputs
252
+ decoder_seq_length = seq_length if not self.use_past else 1
253
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
254
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
255
+ )
256
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
257
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
258
+
259
+ if self.use_past:
260
+ if not is_torch_available():
261
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
262
+ else:
263
+ import torch
264
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
265
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
266
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
267
+ encoder_shape = (
268
+ batch,
269
+ num_encoder_attention_heads,
270
+ encoder_seq_length,
271
+ self._config.hidden_size // num_encoder_attention_heads,
272
+ )
273
+ decoder_past_length = decoder_seq_length + 3
274
+ decoder_shape = (
275
+ batch,
276
+ num_decoder_attention_heads,
277
+ decoder_past_length,
278
+ self._config.hidden_size // num_decoder_attention_heads,
279
+ )
280
+
281
+ common_inputs["decoder_attention_mask"] = torch.cat(
282
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
283
+ )
284
+
285
+ common_inputs["past_key_values"] = []
286
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
287
+ num_encoder_layers, num_decoder_layers = self.num_layers
288
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
289
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
290
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
291
+
292
+ for _ in range(min_num_layers):
293
+ common_inputs["past_key_values"].append(
294
+ (
295
+ torch.zeros(decoder_shape),
296
+ torch.zeros(decoder_shape),
297
+ torch.zeros(encoder_shape),
298
+ torch.zeros(encoder_shape),
299
+ )
300
+ )
301
+ # TODO: test this.
302
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
303
+ for _ in range(min_num_layers, max_num_layers):
304
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
305
+ return common_inputs
306
+
307
+ def _generate_dummy_inputs_for_causal_lm(
308
+ self,
309
+ tokenizer: PreTrainedTokenizer,
310
+ batch_size: int = -1,
311
+ seq_length: int = -1,
312
+ is_pair: bool = False,
313
+ framework: Optional[TensorType] = None,
314
+ ) -> Mapping[str, Any]:
315
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
316
+ tokenizer, batch_size, seq_length, is_pair, framework
317
+ )
318
+
319
+ if self.use_past:
320
+ if not is_torch_available():
321
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
322
+ else:
323
+ import torch
324
+ batch, seqlen = common_inputs["input_ids"].shape
325
+ # Not using the same length for past_key_values
326
+ past_key_values_length = seqlen + 2
327
+ num_encoder_layers, _ = self.num_layers
328
+ num_encoder_attention_heads, _ = self.num_attention_heads
329
+ past_shape = (
330
+ batch,
331
+ num_encoder_attention_heads,
332
+ past_key_values_length,
333
+ self._config.hidden_size // num_encoder_attention_heads,
334
+ )
335
+
336
+ mask_dtype = common_inputs["attention_mask"].dtype
337
+ common_inputs["attention_mask"] = torch.cat(
338
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
339
+ )
340
+ common_inputs["past_key_values"] = [
341
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
342
+ ]
343
+ return common_inputs
344
+
345
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
346
+ self,
347
+ tokenizer: PreTrainedTokenizer,
348
+ batch_size: int = -1,
349
+ seq_length: int = -1,
350
+ is_pair: bool = False,
351
+ framework: Optional[TensorType] = None,
352
+ ) -> Mapping[str, Any]:
353
+ # Copied from OnnxConfig.generate_dummy_inputs
354
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
355
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
356
+ batch_size = compute_effective_axis_dimension(
357
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
358
+ )
359
+
360
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
361
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
362
+ seq_length = compute_effective_axis_dimension(
363
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
364
+ )
365
+
366
+ # Generate dummy inputs according to compute batch and sequence
367
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
368
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
369
+ return common_inputs
370
+
371
+ def generate_dummy_inputs(
372
+ self,
373
+ tokenizer: PreTrainedTokenizer,
374
+ batch_size: int = -1,
375
+ seq_length: int = -1,
376
+ is_pair: bool = False,
377
+ framework: Optional[TensorType] = None,
378
+ ) -> Mapping[str, Any]:
379
+ if self.task in ["default", "seq2seq-lm"]:
380
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
381
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
382
+ )
383
+
384
+ elif self.task == "causal-lm":
385
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
386
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
387
+ )
388
+ else:
389
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
390
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
391
+ )
392
+
393
+ return common_inputs
394
+
395
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
396
+ if self.task in ["default", "seq2seq-lm"]:
397
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
398
+ else:
399
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
400
+ flattened_output, name, idx, t
401
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BART checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import os
20
+ from pathlib import Path
21
+
22
+ import fairseq
23
+ import torch
24
+ from packaging import version
25
+ from torch import nn
26
+
27
+ from transformers import (
28
+ BartConfig,
29
+ BartForConditionalGeneration,
30
+ BartForSequenceClassification,
31
+ BartModel,
32
+ BartTokenizer,
33
+ )
34
+ from transformers.utils import logging
35
+
36
+
37
+ FAIRSEQ_MODELS = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
38
+ extra_arch = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
39
+ if version.parse(fairseq.__version__) < version.parse("0.9.0"):
40
+ raise Exception("requires fairseq >= 0.9.0")
41
+
42
+
43
+ logging.set_verbosity_info()
44
+ logger = logging.get_logger(__name__)
45
+
46
+ SAMPLE_TEXT = " Hello world! cécé herlolip"
47
+
48
+ mnli_rename_keys = [
49
+ ("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
50
+ ("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
51
+ ("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
52
+ ("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
53
+ ]
54
+
55
+
56
+ def remove_ignore_keys_(state_dict):
57
+ ignore_keys = [
58
+ "encoder.version",
59
+ "decoder.version",
60
+ "model.encoder.version",
61
+ "model.decoder.version",
62
+ "_float_tensor",
63
+ ]
64
+ for k in ignore_keys:
65
+ state_dict.pop(k, None)
66
+
67
+
68
+ def rename_key(dct, old, new):
69
+ val = dct.pop(old)
70
+ dct[new] = val
71
+
72
+
73
+ def load_xsum_checkpoint(checkpoint_path):
74
+ """Checkpoint path should end in model.pt"""
75
+ sd = torch.load(checkpoint_path, map_location="cpu")
76
+ hub_interface = torch.hub.load("pytorch/fairseq", "bart.large.cnn").eval()
77
+ hub_interface.model.load_state_dict(sd["model"])
78
+ return hub_interface
79
+
80
+
81
+ def make_linear_from_emb(emb):
82
+ vocab_size, emb_size = emb.weight.shape
83
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
84
+ lin_layer.weight.data = emb.weight.data
85
+ return lin_layer
86
+
87
+
88
+ @torch.no_grad()
89
+ def convert_bart_checkpoint(checkpoint_path, pytorch_dump_folder_path, hf_checkpoint_name=None):
90
+ """
91
+ Copy/paste/tweak model's weights to our BERT structure.
92
+ """
93
+ if not os.path.exists(checkpoint_path):
94
+ bart = torch.hub.load("pytorch/fairseq", checkpoint_path).eval()
95
+ else:
96
+ bart = load_xsum_checkpoint(checkpoint_path)
97
+
98
+ bart.model.upgrade_state_dict(bart.model.state_dict())
99
+ if hf_checkpoint_name is None:
100
+ hf_checkpoint_name = checkpoint_path.replace(".", "-")
101
+ config = BartConfig.from_pretrained(hf_checkpoint_name)
102
+ tokens = bart.encode(SAMPLE_TEXT).unsqueeze(0)
103
+ tokens2 = BartTokenizer.from_pretrained(hf_checkpoint_name).encode(SAMPLE_TEXT, return_tensors="pt").unsqueeze(0)
104
+ if not torch.eq(tokens, tokens2).all():
105
+ raise ValueError(
106
+ f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokens2}"
107
+ )
108
+
109
+ if checkpoint_path == "bart.large.mnli":
110
+ state_dict = bart.state_dict()
111
+ remove_ignore_keys_(state_dict)
112
+ state_dict["model.shared.weight"] = state_dict["model.decoder.embed_tokens.weight"]
113
+ for src, dest in mnli_rename_keys:
114
+ rename_key(state_dict, src, dest)
115
+ model = BartForSequenceClassification(config).eval()
116
+ model.load_state_dict(state_dict)
117
+ fairseq_output = bart.predict("mnli", tokens, return_logits=True)
118
+ new_model_outputs = model(tokens)[0] # logits
119
+ else: # no classification heads to worry about
120
+ state_dict = bart.model.state_dict()
121
+ remove_ignore_keys_(state_dict)
122
+ state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
123
+ fairseq_output = bart.extract_features(tokens)
124
+ if hf_checkpoint_name == "facebook/bart-large":
125
+ model = BartModel(config).eval()
126
+ model.load_state_dict(state_dict)
127
+ new_model_outputs = model(tokens).model[0]
128
+ else:
129
+ model = BartForConditionalGeneration(config).eval() # an existing summarization ckpt
130
+ model.model.load_state_dict(state_dict)
131
+ if hasattr(model, "lm_head"):
132
+ model.lm_head = make_linear_from_emb(model.model.shared)
133
+ new_model_outputs = model.model(tokens)[0]
134
+
135
+ # Check results
136
+ if fairseq_output.shape != new_model_outputs.shape:
137
+ raise ValueError(
138
+ f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}"
139
+ )
140
+ if (fairseq_output != new_model_outputs).any().item():
141
+ raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`")
142
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
143
+ model.save_pretrained(pytorch_dump_folder_path)
144
+
145
+
146
+ if __name__ == "__main__":
147
+ parser = argparse.ArgumentParser()
148
+ # Required parameters
149
+ parser.add_argument(
150
+ "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
151
+ )
152
+ parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
153
+ parser.add_argument(
154
+ "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
155
+ )
156
+ args = parser.parse_args()
157
+ convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/modeling_bart.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/modeling_flax_bart.py ADDED
@@ -0,0 +1,1995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax Bart model."""
16
+
17
+ import math
18
+ import random
19
+ from functools import partial
20
+ from typing import Callable, Optional, Tuple
21
+
22
+ import flax.linen as nn
23
+ import jax
24
+ import jax.numpy as jnp
25
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
26
+ from flax.linen import combine_masks, make_causal_mask
27
+ from flax.linen.attention import dot_product_attention_weights
28
+ from flax.traverse_util import flatten_dict, unflatten_dict
29
+ from jax import lax
30
+ from jax.random import PRNGKey
31
+
32
+ from ...modeling_flax_outputs import (
33
+ FlaxBaseModelOutput,
34
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
35
+ FlaxCausalLMOutputWithCrossAttentions,
36
+ FlaxSeq2SeqLMOutput,
37
+ FlaxSeq2SeqModelOutput,
38
+ FlaxSeq2SeqQuestionAnsweringModelOutput,
39
+ FlaxSeq2SeqSequenceClassifierOutput,
40
+ )
41
+ from ...modeling_flax_utils import (
42
+ ACT2FN,
43
+ FlaxPreTrainedModel,
44
+ append_call_sample_docstring,
45
+ append_replace_return_docstrings,
46
+ overwrite_call_docstring,
47
+ )
48
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
49
+ from .configuration_bart import BartConfig
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CHECKPOINT_FOR_DOC = "facebook/bart-base"
55
+ _CONFIG_FOR_DOC = "BartConfig"
56
+
57
+
58
+ BART_START_DOCSTRING = r"""
59
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
60
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
61
+ etc.)
62
+
63
+ This model is also a Flax Linen
64
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
65
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
66
+
67
+ Finally, this model supports inherent JAX features such as:
68
+
69
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
70
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
71
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
72
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
73
+
74
+ Parameters:
75
+ config ([`BartConfig`]): Model configuration class with all the parameters of the model.
76
+ Initializing with a config file does not load the weights associated with the model, only the
77
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
78
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
79
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
80
+ `jax.numpy.bfloat16` (on TPUs).
81
+
82
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
83
+ specified all the computation will be performed with the given `dtype`.
84
+
85
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
86
+ parameters.**
87
+
88
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
89
+ [`~FlaxPreTrainedModel.to_bf16`].
90
+ """
91
+
92
+ BART_INPUTS_DOCSTRING = r"""
93
+ Args:
94
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
95
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
96
+ it.
97
+
98
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
99
+ [`PreTrainedTokenizer.__call__`] for details.
100
+
101
+ [What are input IDs?](../glossary#input-ids)
102
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
103
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
104
+
105
+ - 1 for tokens that are **not masked**,
106
+ - 0 for tokens that are **masked**.
107
+
108
+ [What are attention masks?](../glossary#attention-mask)
109
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
110
+ Indices of decoder input sequence tokens in the vocabulary.
111
+
112
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
113
+ [`PreTrainedTokenizer.__call__`] for details.
114
+
115
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
116
+
117
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
118
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
119
+ for denoising pre-training following the paper.
120
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
121
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
122
+ be used by default.
123
+
124
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
125
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
126
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
127
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
128
+ config.max_position_embeddings - 1]`.
129
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
130
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
131
+ range `[0, config.max_position_embeddings - 1]`.
132
+ output_attentions (`bool`, *optional*):
133
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
134
+ tensors for more detail.
135
+ output_hidden_states (`bool`, *optional*):
136
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
137
+ more detail.
138
+ return_dict (`bool`, *optional*):
139
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
140
+ """
141
+
142
+
143
+ BART_ENCODE_INPUTS_DOCSTRING = r"""
144
+ Args:
145
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
146
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
147
+ it.
148
+
149
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
150
+ [`PreTrainedTokenizer.__call__`] for details.
151
+
152
+ [What are input IDs?](../glossary#input-ids)
153
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
154
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
155
+
156
+ - 1 for tokens that are **not masked**,
157
+ - 0 for tokens that are **masked**.
158
+
159
+ [What are attention masks?](../glossary#attention-mask)
160
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
161
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
162
+ config.max_position_embeddings - 1]`.
163
+ output_attentions (`bool`, *optional*):
164
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
165
+ tensors for more detail.
166
+ output_hidden_states (`bool`, *optional*):
167
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
168
+ more detail.
169
+ return_dict (`bool`, *optional*):
170
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
171
+ """
172
+
173
+ BART_DECODE_INPUTS_DOCSTRING = r"""
174
+ Args:
175
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
176
+ Indices of decoder input sequence tokens in the vocabulary.
177
+
178
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
179
+ [`PreTrainedTokenizer.__call__`] for details.
180
+
181
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
182
+
183
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
184
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
185
+ for denoising pre-training following the paper.
186
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
187
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
188
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
189
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
190
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
191
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
192
+
193
+ - 1 for tokens that are **not masked**,
194
+ - 0 for tokens that are **masked**.
195
+
196
+ [What are attention masks?](../glossary#attention-mask)
197
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
198
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
199
+ be used by default.
200
+
201
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
202
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
203
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
204
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
205
+ range `[0, config.max_position_embeddings - 1]`.
206
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
207
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
208
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
209
+ output_attentions (`bool`, *optional*):
210
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
211
+ tensors for more detail.
212
+ output_hidden_states (`bool`, *optional*):
213
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
214
+ more detail.
215
+ return_dict (`bool`, *optional*):
216
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
217
+ """
218
+
219
+
220
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
221
+ """
222
+ Shift input ids one token to the right.
223
+ """
224
+ shifted_input_ids = jnp.zeros_like(input_ids)
225
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
226
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
227
+
228
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
229
+ return shifted_input_ids
230
+
231
+
232
+ class FlaxBartAttention(nn.Module):
233
+ config: BartConfig
234
+ embed_dim: int
235
+ num_heads: int
236
+ dropout: float = 0.0
237
+ causal: bool = False
238
+ bias: bool = True
239
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
240
+
241
+ def setup(self) -> None:
242
+ self.head_dim = self.embed_dim // self.num_heads
243
+ if self.head_dim * self.num_heads != self.embed_dim:
244
+ raise ValueError(
245
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
246
+ f" and `num_heads`: {self.num_heads})."
247
+ )
248
+
249
+ dense = partial(
250
+ nn.Dense,
251
+ self.embed_dim,
252
+ use_bias=self.bias,
253
+ dtype=self.dtype,
254
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
255
+ )
256
+
257
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
258
+ self.out_proj = dense()
259
+
260
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
261
+
262
+ if self.causal:
263
+ self.causal_mask = make_causal_mask(
264
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
265
+ )
266
+
267
+ def _split_heads(self, hidden_states):
268
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
269
+
270
+ def _merge_heads(self, hidden_states):
271
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
272
+
273
+ @nn.compact
274
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
275
+ """
276
+ This function takes projected key, value states from a single input token and concatenates the states to cached
277
+ states from previous steps. This function is slighly adapted from the official Flax repository:
278
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
279
+ """
280
+ # detect if we're initializing by absence of existing cache data.
281
+ is_initialized = self.has_variable("cache", "cached_key")
282
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
283
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
284
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
285
+
286
+ if is_initialized:
287
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
288
+ # update key, value caches with our new 1d spatial slices
289
+ cur_index = cache_index.value
290
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
291
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
292
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
293
+ cached_key.value = key
294
+ cached_value.value = value
295
+ num_updated_cache_vectors = query.shape[1]
296
+ cache_index.value = cache_index.value + num_updated_cache_vectors
297
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
298
+ pad_mask = jnp.broadcast_to(
299
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
300
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
301
+ )
302
+ attention_mask = combine_masks(pad_mask, attention_mask)
303
+ return key, value, attention_mask
304
+
305
+ def __call__(
306
+ self,
307
+ hidden_states: jnp.ndarray,
308
+ key_value_states: Optional[jnp.ndarray] = None,
309
+ attention_mask: Optional[jnp.ndarray] = None,
310
+ init_cache: bool = False,
311
+ deterministic: bool = True,
312
+ ) -> Tuple[jnp.ndarray]:
313
+ """Input shape: Batch x Time x Channel"""
314
+
315
+ # if key_value_states are provided this layer is used as a cross-attention layer
316
+ # for the decoder
317
+ is_cross_attention = key_value_states is not None
318
+ batch_size = hidden_states.shape[0]
319
+
320
+ # get query proj
321
+ query_states = self.q_proj(hidden_states)
322
+ # get key, value proj
323
+ if is_cross_attention:
324
+ # cross_attentions
325
+ key_states = self.k_proj(key_value_states)
326
+ value_states = self.v_proj(key_value_states)
327
+ else:
328
+ # self_attention
329
+ key_states = self.k_proj(hidden_states)
330
+ value_states = self.v_proj(hidden_states)
331
+
332
+ query_states = self._split_heads(query_states)
333
+ key_states = self._split_heads(key_states)
334
+ value_states = self._split_heads(value_states)
335
+
336
+ # handle cache prepare causal attention mask
337
+ if self.causal:
338
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
339
+ if self.has_variable("cache", "cached_key"):
340
+ mask_shift = self.variables["cache"]["cache_index"]
341
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
342
+ causal_mask = lax.dynamic_slice(
343
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
344
+ )
345
+ else:
346
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
347
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
348
+
349
+ # combine masks if needed
350
+ if attention_mask is not None and self.causal:
351
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
352
+ attention_mask = combine_masks(attention_mask, causal_mask)
353
+ elif self.causal:
354
+ attention_mask = causal_mask
355
+ elif attention_mask is not None:
356
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
357
+
358
+ # During fast autoregressive decoding, we feed one position at a time,
359
+ # and cache the keys and values step by step.
360
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
361
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
362
+ key_states, value_states, query_states, attention_mask
363
+ )
364
+
365
+ # Convert the boolean attention mask to an attention bias.
366
+ if attention_mask is not None:
367
+ # attention mask in the form of attention bias
368
+ attention_bias = lax.select(
369
+ attention_mask > 0,
370
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
371
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
372
+ )
373
+ else:
374
+ attention_bias = None
375
+
376
+ dropout_rng = None
377
+ if not deterministic and self.dropout > 0.0:
378
+ dropout_rng = self.make_rng("dropout")
379
+
380
+ attn_weights = dot_product_attention_weights(
381
+ query_states,
382
+ key_states,
383
+ bias=attention_bias,
384
+ dropout_rng=dropout_rng,
385
+ dropout_rate=self.dropout,
386
+ broadcast_dropout=True,
387
+ deterministic=deterministic,
388
+ dtype=self.dtype,
389
+ precision=None,
390
+ )
391
+
392
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
393
+ attn_output = self._merge_heads(attn_output)
394
+ attn_output = self.out_proj(attn_output)
395
+
396
+ return attn_output, attn_weights
397
+
398
+
399
+ class FlaxBartEncoderLayer(nn.Module):
400
+ config: BartConfig
401
+ dtype: jnp.dtype = jnp.float32
402
+
403
+ def setup(self) -> None:
404
+ self.embed_dim = self.config.d_model
405
+ self.self_attn = FlaxBartAttention(
406
+ config=self.config,
407
+ embed_dim=self.embed_dim,
408
+ num_heads=self.config.encoder_attention_heads,
409
+ dropout=self.config.attention_dropout,
410
+ dtype=self.dtype,
411
+ )
412
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
413
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
414
+ self.activation_fn = ACT2FN[self.config.activation_function]
415
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
416
+ self.fc1 = nn.Dense(
417
+ self.config.encoder_ffn_dim,
418
+ dtype=self.dtype,
419
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
420
+ )
421
+ self.fc2 = nn.Dense(
422
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
423
+ )
424
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
425
+
426
+ def __call__(
427
+ self,
428
+ hidden_states: jnp.ndarray,
429
+ attention_mask: jnp.ndarray,
430
+ output_attentions: bool = True,
431
+ deterministic: bool = True,
432
+ ) -> Tuple[jnp.ndarray]:
433
+ residual = hidden_states
434
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
435
+
436
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
437
+ hidden_states = residual + hidden_states
438
+ hidden_states = self.self_attn_layer_norm(hidden_states)
439
+
440
+ residual = hidden_states
441
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
442
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
443
+ hidden_states = self.fc2(hidden_states)
444
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
445
+ hidden_states = residual + hidden_states
446
+ hidden_states = self.final_layer_norm(hidden_states)
447
+
448
+ outputs = (hidden_states,)
449
+
450
+ if output_attentions:
451
+ outputs += (attn_weights,)
452
+
453
+ return outputs
454
+
455
+
456
+ class FlaxBartEncoderLayerCollection(nn.Module):
457
+ config: BartConfig
458
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
459
+
460
+ def setup(self):
461
+ self.layers = [
462
+ FlaxBartEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers)
463
+ ]
464
+ self.layerdrop = self.config.encoder_layerdrop
465
+
466
+ def __call__(
467
+ self,
468
+ hidden_states,
469
+ attention_mask,
470
+ deterministic: bool = True,
471
+ output_attentions: bool = False,
472
+ output_hidden_states: bool = False,
473
+ return_dict: bool = True,
474
+ ):
475
+ all_attentions = () if output_attentions else None
476
+ all_hidden_states = () if output_hidden_states else None
477
+
478
+ for encoder_layer in self.layers:
479
+ if output_hidden_states:
480
+ all_hidden_states = all_hidden_states + (hidden_states,)
481
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
482
+ dropout_probability = random.uniform(0, 1)
483
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
484
+ layer_outputs = (None, None)
485
+ else:
486
+ layer_outputs = encoder_layer(
487
+ hidden_states,
488
+ attention_mask,
489
+ output_attentions,
490
+ deterministic,
491
+ )
492
+ hidden_states = layer_outputs[0]
493
+ if output_attentions:
494
+ all_attentions = all_attentions + (layer_outputs[1],)
495
+
496
+ if output_hidden_states:
497
+ all_hidden_states += (hidden_states,)
498
+
499
+ outputs = (hidden_states, all_hidden_states, all_attentions)
500
+
501
+ if not return_dict:
502
+ return tuple(v for v in outputs if v is not None)
503
+
504
+ return FlaxBaseModelOutput(
505
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
506
+ )
507
+
508
+
509
+ class FlaxBartDecoderLayer(nn.Module):
510
+ config: BartConfig
511
+ dtype: jnp.dtype = jnp.float32
512
+
513
+ def setup(self) -> None:
514
+ self.embed_dim = self.config.d_model
515
+ self.self_attn = FlaxBartAttention(
516
+ config=self.config,
517
+ embed_dim=self.embed_dim,
518
+ num_heads=self.config.decoder_attention_heads,
519
+ dropout=self.config.attention_dropout,
520
+ causal=True,
521
+ dtype=self.dtype,
522
+ )
523
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
524
+ self.activation_fn = ACT2FN[self.config.activation_function]
525
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
526
+
527
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
528
+ self.encoder_attn = FlaxBartAttention(
529
+ config=self.config,
530
+ embed_dim=self.embed_dim,
531
+ num_heads=self.config.decoder_attention_heads,
532
+ dropout=self.config.attention_dropout,
533
+ dtype=self.dtype,
534
+ )
535
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
536
+ self.fc1 = nn.Dense(
537
+ self.config.decoder_ffn_dim,
538
+ dtype=self.dtype,
539
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
540
+ )
541
+ self.fc2 = nn.Dense(
542
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
543
+ )
544
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
545
+
546
+ def __call__(
547
+ self,
548
+ hidden_states: jnp.ndarray,
549
+ attention_mask: jnp.ndarray,
550
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
551
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
552
+ init_cache: bool = False,
553
+ output_attentions: bool = True,
554
+ deterministic: bool = True,
555
+ ) -> Tuple[jnp.ndarray]:
556
+ residual = hidden_states
557
+
558
+ # Self Attention
559
+ hidden_states, self_attn_weights = self.self_attn(
560
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
561
+ )
562
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
563
+ hidden_states = residual + hidden_states
564
+ hidden_states = self.self_attn_layer_norm(hidden_states)
565
+
566
+ # Cross-Attention Block
567
+ cross_attn_weights = None
568
+ if encoder_hidden_states is not None:
569
+ residual = hidden_states
570
+
571
+ hidden_states, cross_attn_weights = self.encoder_attn(
572
+ hidden_states=hidden_states,
573
+ key_value_states=encoder_hidden_states,
574
+ attention_mask=encoder_attention_mask,
575
+ )
576
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
577
+ hidden_states = residual + hidden_states
578
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
579
+
580
+ # Fully Connected
581
+ residual = hidden_states
582
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
583
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
584
+ hidden_states = self.fc2(hidden_states)
585
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
586
+ hidden_states = residual + hidden_states
587
+ hidden_states = self.final_layer_norm(hidden_states)
588
+
589
+ outputs = (hidden_states,)
590
+
591
+ if output_attentions:
592
+ outputs += (self_attn_weights, cross_attn_weights)
593
+
594
+ return outputs
595
+
596
+
597
+ class FlaxBartDecoderLayerCollection(nn.Module):
598
+ config: BartConfig
599
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
600
+
601
+ def setup(self):
602
+ self.layers = [
603
+ FlaxBartDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers)
604
+ ]
605
+ self.layerdrop = self.config.decoder_layerdrop
606
+
607
+ def __call__(
608
+ self,
609
+ hidden_states,
610
+ attention_mask,
611
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
612
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
613
+ deterministic: bool = True,
614
+ init_cache: bool = False,
615
+ output_attentions: bool = False,
616
+ output_hidden_states: bool = False,
617
+ return_dict: bool = True,
618
+ ):
619
+ # decoder layers
620
+ all_hidden_states = () if output_hidden_states else None
621
+ all_self_attns = () if output_attentions else None
622
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
623
+
624
+ for decoder_layer in self.layers:
625
+ if output_hidden_states:
626
+ all_hidden_states += (hidden_states,)
627
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
628
+ dropout_probability = random.uniform(0, 1)
629
+ if not deterministic and (dropout_probability < self.layerdrop):
630
+ layer_outputs = (None, None, None)
631
+ else:
632
+ layer_outputs = decoder_layer(
633
+ hidden_states,
634
+ attention_mask=attention_mask,
635
+ encoder_hidden_states=encoder_hidden_states,
636
+ encoder_attention_mask=encoder_attention_mask,
637
+ init_cache=init_cache,
638
+ output_attentions=output_attentions,
639
+ deterministic=deterministic,
640
+ )
641
+
642
+ hidden_states = layer_outputs[0]
643
+ if output_attentions:
644
+ all_self_attns += (layer_outputs[1],)
645
+
646
+ if encoder_hidden_states is not None:
647
+ all_cross_attentions += (layer_outputs[2],)
648
+
649
+ # add hidden states from the last decoder layer
650
+ if output_hidden_states:
651
+ all_hidden_states += (hidden_states,)
652
+
653
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
654
+
655
+ if not return_dict:
656
+ return tuple(v for v in outputs if v is not None)
657
+
658
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
659
+ last_hidden_state=hidden_states,
660
+ hidden_states=all_hidden_states,
661
+ attentions=all_self_attns,
662
+ cross_attentions=all_cross_attentions,
663
+ )
664
+
665
+
666
+ class FlaxBartClassificationHead(nn.Module):
667
+ """Head for sentence-level classification tasks."""
668
+
669
+ config: BartConfig
670
+ inner_dim: int
671
+ num_classes: int
672
+ pooler_dropout: float
673
+ dtype: jnp.dtype = jnp.float32
674
+
675
+ def setup(self):
676
+ self.dense = nn.Dense(
677
+ self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
678
+ )
679
+ self.dropout = nn.Dropout(rate=self.pooler_dropout)
680
+ self.out_proj = nn.Dense(
681
+ self.num_classes,
682
+ dtype=self.dtype,
683
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
684
+ )
685
+
686
+ def __call__(self, hidden_states: jnp.ndarray, deterministic: bool):
687
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
688
+ hidden_states = self.dense(hidden_states)
689
+ hidden_states = jnp.tanh(hidden_states)
690
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
691
+ hidden_states = self.out_proj(hidden_states)
692
+ return hidden_states
693
+
694
+
695
+ class FlaxBartEncoder(nn.Module):
696
+ config: BartConfig
697
+ embed_tokens: nn.Embed
698
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
699
+
700
+ def setup(self):
701
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
702
+
703
+ embed_dim = self.config.d_model
704
+ self.padding_idx = self.config.pad_token_id
705
+ self.max_source_positions = self.config.max_position_embeddings
706
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
707
+
708
+ # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
709
+ # and adjust num_embeddings appropriately. Other models don't have this hack
710
+ self.offset = 2
711
+ self.embed_positions = nn.Embed(
712
+ self.config.max_position_embeddings + self.offset,
713
+ embed_dim,
714
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
715
+ dtype=self.dtype,
716
+ )
717
+ self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
718
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
719
+
720
+ def __call__(
721
+ self,
722
+ input_ids,
723
+ attention_mask,
724
+ position_ids,
725
+ output_attentions: bool = False,
726
+ output_hidden_states: bool = False,
727
+ return_dict: bool = True,
728
+ deterministic: bool = True,
729
+ ):
730
+ input_shape = input_ids.shape
731
+ input_ids = input_ids.reshape(-1, input_shape[-1])
732
+
733
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
734
+
735
+ embed_pos = self.embed_positions(position_ids + self.offset)
736
+
737
+ hidden_states = inputs_embeds + embed_pos
738
+ hidden_states = self.layernorm_embedding(hidden_states)
739
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
740
+
741
+ outputs = self.layers(
742
+ hidden_states,
743
+ attention_mask,
744
+ deterministic=deterministic,
745
+ output_attentions=output_attentions,
746
+ output_hidden_states=output_hidden_states,
747
+ return_dict=return_dict,
748
+ )
749
+
750
+ if not return_dict:
751
+ return outputs
752
+
753
+ return FlaxBaseModelOutput(
754
+ last_hidden_state=outputs.last_hidden_state,
755
+ hidden_states=outputs.hidden_states,
756
+ attentions=outputs.attentions,
757
+ )
758
+
759
+
760
+ class FlaxBartDecoder(nn.Module):
761
+ config: BartConfig
762
+ embed_tokens: nn.Embed
763
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
764
+
765
+ def setup(self):
766
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
767
+
768
+ embed_dim = self.config.d_model
769
+ self.padding_idx = self.config.pad_token_id
770
+ self.max_target_positions = self.config.max_position_embeddings
771
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
772
+
773
+ # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
774
+ # and adjust num_embeddings appropriately. Other models don't have this hack
775
+ self.offset = 2
776
+ self.embed_positions = nn.Embed(
777
+ self.config.max_position_embeddings + self.offset,
778
+ embed_dim,
779
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
780
+ dtype=self.dtype,
781
+ )
782
+
783
+ self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
784
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
785
+
786
+ def __call__(
787
+ self,
788
+ input_ids,
789
+ attention_mask,
790
+ position_ids,
791
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
792
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
793
+ init_cache: bool = False,
794
+ output_attentions: bool = False,
795
+ output_hidden_states: bool = False,
796
+ return_dict: bool = True,
797
+ deterministic: bool = True,
798
+ ):
799
+ input_shape = input_ids.shape
800
+ input_ids = input_ids.reshape(-1, input_shape[-1])
801
+
802
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
803
+
804
+ # embed positions
805
+ positions = self.embed_positions(position_ids + self.offset)
806
+
807
+ hidden_states = inputs_embeds + positions
808
+ hidden_states = self.layernorm_embedding(hidden_states)
809
+
810
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
811
+
812
+ outputs = self.layers(
813
+ hidden_states,
814
+ attention_mask,
815
+ encoder_hidden_states,
816
+ encoder_attention_mask,
817
+ deterministic=deterministic,
818
+ init_cache=init_cache,
819
+ output_attentions=output_attentions,
820
+ output_hidden_states=output_hidden_states,
821
+ return_dict=return_dict,
822
+ )
823
+
824
+ if not return_dict:
825
+ return outputs
826
+
827
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
828
+ last_hidden_state=outputs.last_hidden_state,
829
+ hidden_states=outputs.hidden_states,
830
+ attentions=outputs.attentions,
831
+ cross_attentions=outputs.cross_attentions,
832
+ )
833
+
834
+
835
+ class FlaxBartModule(nn.Module):
836
+ config: BartConfig
837
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
838
+
839
+ def setup(self):
840
+ self.shared = nn.Embed(
841
+ self.config.vocab_size,
842
+ self.config.d_model,
843
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
844
+ dtype=self.dtype,
845
+ )
846
+
847
+ self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
848
+ self.decoder = FlaxBartDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
849
+
850
+ def _get_encoder_module(self):
851
+ return self.encoder
852
+
853
+ def _get_decoder_module(self):
854
+ return self.decoder
855
+
856
+ def __call__(
857
+ self,
858
+ input_ids,
859
+ attention_mask,
860
+ decoder_input_ids,
861
+ decoder_attention_mask,
862
+ position_ids,
863
+ decoder_position_ids,
864
+ output_attentions: bool = False,
865
+ output_hidden_states: bool = False,
866
+ return_dict: bool = True,
867
+ deterministic: bool = True,
868
+ ):
869
+ encoder_outputs = self.encoder(
870
+ input_ids=input_ids,
871
+ attention_mask=attention_mask,
872
+ position_ids=position_ids,
873
+ output_attentions=output_attentions,
874
+ output_hidden_states=output_hidden_states,
875
+ return_dict=return_dict,
876
+ deterministic=deterministic,
877
+ )
878
+
879
+ decoder_outputs = self.decoder(
880
+ input_ids=decoder_input_ids,
881
+ attention_mask=decoder_attention_mask,
882
+ position_ids=decoder_position_ids,
883
+ encoder_hidden_states=encoder_outputs[0],
884
+ encoder_attention_mask=attention_mask,
885
+ output_attentions=output_attentions,
886
+ output_hidden_states=output_hidden_states,
887
+ return_dict=return_dict,
888
+ deterministic=deterministic,
889
+ )
890
+
891
+ if not return_dict:
892
+ return decoder_outputs + encoder_outputs
893
+
894
+ return FlaxSeq2SeqModelOutput(
895
+ last_hidden_state=decoder_outputs.last_hidden_state,
896
+ decoder_hidden_states=decoder_outputs.hidden_states,
897
+ decoder_attentions=decoder_outputs.attentions,
898
+ cross_attentions=decoder_outputs.cross_attentions,
899
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
900
+ encoder_hidden_states=encoder_outputs.hidden_states,
901
+ encoder_attentions=encoder_outputs.attentions,
902
+ )
903
+
904
+
905
+ class FlaxBartPreTrainedModel(FlaxPreTrainedModel):
906
+ config_class = BartConfig
907
+ base_model_prefix: str = "model"
908
+ module_class: nn.Module = None
909
+
910
+ def __init__(
911
+ self,
912
+ config: BartConfig,
913
+ input_shape: Tuple[int] = (1, 1),
914
+ seed: int = 0,
915
+ dtype: jnp.dtype = jnp.float32,
916
+ _do_init: bool = True,
917
+ **kwargs,
918
+ ):
919
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
920
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
921
+
922
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
923
+ # init input tensors
924
+ input_ids = jnp.zeros(input_shape, dtype="i4")
925
+ # make sure initialization pass will work for FlaxBartForSequenceClassificationModule
926
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
927
+ attention_mask = jnp.ones_like(input_ids)
928
+ decoder_input_ids = input_ids
929
+ decoder_attention_mask = jnp.ones_like(input_ids)
930
+
931
+ batch_size, sequence_length = input_ids.shape
932
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
933
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
934
+
935
+ params_rng, dropout_rng = jax.random.split(rng)
936
+ rngs = {"params": params_rng, "dropout": dropout_rng}
937
+
938
+ random_params = self.module.init(
939
+ rngs,
940
+ input_ids,
941
+ attention_mask,
942
+ decoder_input_ids,
943
+ decoder_attention_mask,
944
+ position_ids,
945
+ decoder_position_ids,
946
+ )["params"]
947
+
948
+ if params is not None:
949
+ random_params = flatten_dict(unfreeze(random_params))
950
+ params = flatten_dict(unfreeze(params))
951
+ for missing_key in self._missing_keys:
952
+ params[missing_key] = random_params[missing_key]
953
+ self._missing_keys = set()
954
+ return freeze(unflatten_dict(params))
955
+ else:
956
+ return random_params
957
+
958
+ def init_cache(self, batch_size, max_length, encoder_outputs):
959
+ r"""
960
+ Args:
961
+ batch_size (`int`):
962
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
963
+ max_length (`int`):
964
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
965
+ cache.
966
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
967
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
968
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
969
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
970
+ cross-attention of the decoder.
971
+ """
972
+ # init input variables to retrieve cache
973
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
974
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
975
+ decoder_position_ids = jnp.broadcast_to(
976
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
977
+ )
978
+
979
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
980
+ decoder_module = module._get_decoder_module()
981
+ return decoder_module(
982
+ decoder_input_ids,
983
+ decoder_attention_mask,
984
+ decoder_position_ids,
985
+ **kwargs,
986
+ )
987
+
988
+ init_variables = self.module.init(
989
+ jax.random.PRNGKey(0),
990
+ decoder_input_ids=decoder_input_ids,
991
+ decoder_attention_mask=decoder_attention_mask,
992
+ decoder_position_ids=decoder_position_ids,
993
+ encoder_hidden_states=encoder_outputs[0],
994
+ init_cache=True,
995
+ method=_decoder_forward, # we only need to call the decoder to init the cache
996
+ )
997
+ return unfreeze(init_variables["cache"])
998
+
999
+ @add_start_docstrings(BART_ENCODE_INPUTS_DOCSTRING)
1000
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BartConfig)
1001
+ def encode(
1002
+ self,
1003
+ input_ids: jnp.ndarray,
1004
+ attention_mask: Optional[jnp.ndarray] = None,
1005
+ position_ids: Optional[jnp.ndarray] = None,
1006
+ output_attentions: Optional[bool] = None,
1007
+ output_hidden_states: Optional[bool] = None,
1008
+ return_dict: Optional[bool] = None,
1009
+ train: bool = False,
1010
+ params: dict = None,
1011
+ dropout_rng: PRNGKey = None,
1012
+ ):
1013
+ r"""
1014
+ Returns:
1015
+
1016
+ Example:
1017
+
1018
+ ```python
1019
+ >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration
1020
+
1021
+ >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
1022
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
1023
+
1024
+ >>> text = "My friends are cool but they eat too many carbs."
1025
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
1026
+ >>> encoder_outputs = model.encode(**inputs)
1027
+ ```"""
1028
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1029
+ output_hidden_states = (
1030
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1031
+ )
1032
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1033
+
1034
+ if attention_mask is None:
1035
+ attention_mask = jnp.ones_like(input_ids)
1036
+ if position_ids is None:
1037
+ batch_size, sequence_length = input_ids.shape
1038
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1039
+
1040
+ # Handle any PRNG if needed
1041
+ rngs = {}
1042
+ if dropout_rng is not None:
1043
+ rngs["dropout"] = dropout_rng
1044
+
1045
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
1046
+ encode_module = module._get_encoder_module()
1047
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
1048
+
1049
+ return self.module.apply(
1050
+ {"params": params or self.params},
1051
+ input_ids=jnp.array(input_ids, dtype="i4"),
1052
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1053
+ position_ids=jnp.array(position_ids, dtype="i4"),
1054
+ output_attentions=output_attentions,
1055
+ output_hidden_states=output_hidden_states,
1056
+ return_dict=return_dict,
1057
+ deterministic=not train,
1058
+ rngs=rngs,
1059
+ method=_encoder_forward,
1060
+ )
1061
+
1062
+ @add_start_docstrings(BART_DECODE_INPUTS_DOCSTRING)
1063
+ @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BartConfig)
1064
+ def decode(
1065
+ self,
1066
+ decoder_input_ids,
1067
+ encoder_outputs,
1068
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1069
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1070
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1071
+ past_key_values: dict = None,
1072
+ output_attentions: Optional[bool] = None,
1073
+ output_hidden_states: Optional[bool] = None,
1074
+ return_dict: Optional[bool] = None,
1075
+ train: bool = False,
1076
+ params: dict = None,
1077
+ dropout_rng: PRNGKey = None,
1078
+ ):
1079
+ r"""
1080
+ Returns:
1081
+
1082
+ Example:
1083
+
1084
+ ```python
1085
+ >>> import jax.numpy as jnp
1086
+ >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration
1087
+
1088
+ >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
1089
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
1090
+
1091
+ >>> text = "My friends are cool but they eat too many carbs."
1092
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
1093
+ >>> encoder_outputs = model.encode(**inputs)
1094
+
1095
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1096
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1097
+
1098
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1099
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
1100
+ ```"""
1101
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1102
+ output_hidden_states = (
1103
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1104
+ )
1105
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1106
+
1107
+ encoder_hidden_states = encoder_outputs[0]
1108
+ if encoder_attention_mask is None:
1109
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1110
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1111
+
1112
+ batch_size, sequence_length = decoder_input_ids.shape
1113
+ if decoder_attention_mask is None:
1114
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1115
+
1116
+ if decoder_position_ids is None:
1117
+ if past_key_values is not None:
1118
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1119
+
1120
+ decoder_position_ids = jnp.broadcast_to(
1121
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1122
+ )
1123
+
1124
+ # Handle any PRNG if needed
1125
+ rngs = {}
1126
+ if dropout_rng is not None:
1127
+ rngs["dropout"] = dropout_rng
1128
+
1129
+ inputs = {"params": params or self.params}
1130
+
1131
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1132
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1133
+ # it can be changed by FlaxBartAttention module
1134
+ if past_key_values:
1135
+ inputs["cache"] = past_key_values
1136
+ mutable = ["cache"]
1137
+ else:
1138
+ mutable = False
1139
+
1140
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1141
+ decoder_module = module._get_decoder_module()
1142
+ return decoder_module(
1143
+ decoder_input_ids,
1144
+ decoder_attention_mask,
1145
+ decoder_position_ids,
1146
+ **kwargs,
1147
+ )
1148
+
1149
+ outputs = self.module.apply(
1150
+ inputs,
1151
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1152
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1153
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1154
+ encoder_hidden_states=encoder_hidden_states,
1155
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1156
+ output_attentions=output_attentions,
1157
+ output_hidden_states=output_hidden_states,
1158
+ return_dict=return_dict,
1159
+ deterministic=not train,
1160
+ rngs=rngs,
1161
+ mutable=mutable,
1162
+ method=_decoder_forward,
1163
+ )
1164
+
1165
+ # add updated cache to model output
1166
+ if past_key_values is not None and return_dict:
1167
+ outputs, past = outputs
1168
+ outputs["past_key_values"] = unfreeze(past["cache"])
1169
+ return outputs
1170
+ elif past_key_values is not None and not return_dict:
1171
+ outputs, past = outputs
1172
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1173
+
1174
+ return outputs
1175
+
1176
+ @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
1177
+ def __call__(
1178
+ self,
1179
+ input_ids: jnp.ndarray,
1180
+ attention_mask: Optional[jnp.ndarray] = None,
1181
+ decoder_input_ids: Optional[jnp.ndarray] = None,
1182
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1183
+ position_ids: Optional[jnp.ndarray] = None,
1184
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1185
+ output_attentions: Optional[bool] = None,
1186
+ output_hidden_states: Optional[bool] = None,
1187
+ return_dict: Optional[bool] = None,
1188
+ train: bool = False,
1189
+ params: dict = None,
1190
+ dropout_rng: PRNGKey = None,
1191
+ ):
1192
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1193
+ output_hidden_states = (
1194
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1195
+ )
1196
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1197
+
1198
+ # prepare encoder inputs
1199
+ if attention_mask is None:
1200
+ attention_mask = jnp.ones_like(input_ids)
1201
+ if position_ids is None:
1202
+ batch_size, sequence_length = input_ids.shape
1203
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1204
+
1205
+ # prepare decoder inputs
1206
+ if decoder_input_ids is None:
1207
+ decoder_input_ids = shift_tokens_right(
1208
+ input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
1209
+ )
1210
+ if decoder_attention_mask is None:
1211
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
1212
+ if decoder_position_ids is None:
1213
+ batch_size, sequence_length = decoder_input_ids.shape
1214
+ decoder_position_ids = jnp.broadcast_to(
1215
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1216
+ )
1217
+
1218
+ # Handle any PRNG if needed
1219
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
1220
+
1221
+ return self.module.apply(
1222
+ {"params": params or self.params},
1223
+ input_ids=jnp.array(input_ids, dtype="i4"),
1224
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1225
+ position_ids=jnp.array(position_ids, dtype="i4"),
1226
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1227
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1228
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1229
+ output_attentions=output_attentions,
1230
+ output_hidden_states=output_hidden_states,
1231
+ return_dict=return_dict,
1232
+ deterministic=not train,
1233
+ rngs=rngs,
1234
+ )
1235
+
1236
+
1237
+ @add_start_docstrings(
1238
+ "The bare Bart Model transformer outputting raw hidden-states without any specific head on top.",
1239
+ BART_START_DOCSTRING,
1240
+ )
1241
+ class FlaxBartModel(FlaxBartPreTrainedModel):
1242
+ config: BartConfig
1243
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
1244
+ module_class = FlaxBartModule
1245
+
1246
+
1247
+ append_call_sample_docstring(FlaxBartModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
1248
+
1249
+
1250
+ class FlaxBartForConditionalGenerationModule(nn.Module):
1251
+ config: BartConfig
1252
+ dtype: jnp.dtype = jnp.float32
1253
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
1254
+
1255
+ def setup(self):
1256
+ self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
1257
+ self.lm_head = nn.Dense(
1258
+ self.model.shared.num_embeddings,
1259
+ use_bias=False,
1260
+ dtype=self.dtype,
1261
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
1262
+ )
1263
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
1264
+
1265
+ def _get_encoder_module(self):
1266
+ return self.model.encoder
1267
+
1268
+ def _get_decoder_module(self):
1269
+ return self.model.decoder
1270
+
1271
+ def __call__(
1272
+ self,
1273
+ input_ids,
1274
+ attention_mask,
1275
+ decoder_input_ids,
1276
+ decoder_attention_mask,
1277
+ position_ids,
1278
+ decoder_position_ids,
1279
+ output_attentions: bool = False,
1280
+ output_hidden_states: bool = False,
1281
+ return_dict: bool = True,
1282
+ deterministic: bool = True,
1283
+ ):
1284
+ outputs = self.model(
1285
+ input_ids=input_ids,
1286
+ attention_mask=attention_mask,
1287
+ decoder_input_ids=decoder_input_ids,
1288
+ decoder_attention_mask=decoder_attention_mask,
1289
+ position_ids=position_ids,
1290
+ decoder_position_ids=decoder_position_ids,
1291
+ output_attentions=output_attentions,
1292
+ output_hidden_states=output_hidden_states,
1293
+ return_dict=return_dict,
1294
+ deterministic=deterministic,
1295
+ )
1296
+
1297
+ hidden_states = outputs[0]
1298
+
1299
+ if self.config.tie_word_embeddings:
1300
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
1301
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1302
+ else:
1303
+ lm_logits = self.lm_head(hidden_states)
1304
+
1305
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
1306
+
1307
+ if not return_dict:
1308
+ output = (lm_logits,) + outputs[1:]
1309
+ return output
1310
+
1311
+ return FlaxSeq2SeqLMOutput(
1312
+ logits=lm_logits,
1313
+ decoder_hidden_states=outputs.decoder_hidden_states,
1314
+ decoder_attentions=outputs.decoder_attentions,
1315
+ cross_attentions=outputs.cross_attentions,
1316
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1317
+ encoder_hidden_states=outputs.encoder_hidden_states,
1318
+ encoder_attentions=outputs.encoder_attentions,
1319
+ )
1320
+
1321
+
1322
+ @add_start_docstrings(
1323
+ "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING
1324
+ )
1325
+ class FlaxBartForConditionalGeneration(FlaxBartPreTrainedModel):
1326
+ module_class = FlaxBartForConditionalGenerationModule
1327
+ dtype: jnp.dtype = jnp.float32
1328
+
1329
+ @add_start_docstrings(BART_DECODE_INPUTS_DOCSTRING)
1330
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BartConfig)
1331
+ def decode(
1332
+ self,
1333
+ decoder_input_ids,
1334
+ encoder_outputs,
1335
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1336
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1337
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1338
+ past_key_values: dict = None,
1339
+ output_attentions: Optional[bool] = None,
1340
+ output_hidden_states: Optional[bool] = None,
1341
+ return_dict: Optional[bool] = None,
1342
+ train: bool = False,
1343
+ params: dict = None,
1344
+ dropout_rng: PRNGKey = None,
1345
+ ):
1346
+ r"""
1347
+ Returns:
1348
+
1349
+ Example:
1350
+
1351
+ ```python
1352
+ >>> import jax.numpy as jnp
1353
+ >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration
1354
+
1355
+ >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
1356
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
1357
+
1358
+ >>> text = "My friends are cool but they eat too many carbs."
1359
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
1360
+ >>> encoder_outputs = model.encode(**inputs)
1361
+
1362
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1363
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1364
+
1365
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1366
+ >>> logits = outputs.logits
1367
+ ```"""
1368
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1369
+ output_hidden_states = (
1370
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1371
+ )
1372
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1373
+
1374
+ encoder_hidden_states = encoder_outputs[0]
1375
+ if encoder_attention_mask is None:
1376
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1377
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1378
+
1379
+ batch_size, sequence_length = decoder_input_ids.shape
1380
+ if decoder_attention_mask is None:
1381
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1382
+
1383
+ if decoder_position_ids is None:
1384
+ if past_key_values is not None:
1385
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1386
+
1387
+ decoder_position_ids = jnp.broadcast_to(
1388
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1389
+ )
1390
+
1391
+ # Handle any PRNG if needed
1392
+ rngs = {}
1393
+ if dropout_rng is not None:
1394
+ rngs["dropout"] = dropout_rng
1395
+
1396
+ inputs = {"params": params or self.params}
1397
+
1398
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1399
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1400
+ # it can be changed by FlaxBartAttention module
1401
+ if past_key_values:
1402
+ inputs["cache"] = past_key_values
1403
+ mutable = ["cache"]
1404
+ else:
1405
+ mutable = False
1406
+
1407
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1408
+ decoder_module = module._get_decoder_module()
1409
+ outputs = decoder_module(
1410
+ decoder_input_ids,
1411
+ decoder_attention_mask,
1412
+ decoder_position_ids,
1413
+ **kwargs,
1414
+ )
1415
+ hidden_states = outputs[0]
1416
+
1417
+ if self.config.tie_word_embeddings:
1418
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
1419
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1420
+ else:
1421
+ lm_logits = module.lm_head(hidden_states)
1422
+
1423
+ lm_logits += module.final_logits_bias.astype(self.dtype)
1424
+ return lm_logits, outputs
1425
+
1426
+ outputs = self.module.apply(
1427
+ inputs,
1428
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1429
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1430
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1431
+ encoder_hidden_states=encoder_hidden_states,
1432
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1433
+ output_attentions=output_attentions,
1434
+ output_hidden_states=output_hidden_states,
1435
+ return_dict=return_dict,
1436
+ deterministic=not train,
1437
+ rngs=rngs,
1438
+ mutable=mutable,
1439
+ method=_decoder_forward,
1440
+ )
1441
+
1442
+ if past_key_values is None:
1443
+ lm_logits, decoder_outputs = outputs
1444
+ else:
1445
+ (lm_logits, decoder_outputs), past = outputs
1446
+
1447
+ if return_dict:
1448
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
1449
+ logits=lm_logits,
1450
+ hidden_states=decoder_outputs.hidden_states,
1451
+ attentions=decoder_outputs.attentions,
1452
+ cross_attentions=decoder_outputs.cross_attentions,
1453
+ )
1454
+ else:
1455
+ outputs = (lm_logits,) + decoder_outputs[1:]
1456
+
1457
+ # add updated cache to model output
1458
+ if past_key_values is not None and return_dict:
1459
+ outputs["past_key_values"] = unfreeze(past["cache"])
1460
+ return outputs
1461
+ elif past_key_values is not None and not return_dict:
1462
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1463
+
1464
+ return outputs
1465
+
1466
+ def prepare_inputs_for_generation(
1467
+ self,
1468
+ decoder_input_ids,
1469
+ max_length,
1470
+ attention_mask: Optional[jax.Array] = None,
1471
+ decoder_attention_mask: Optional[jax.Array] = None,
1472
+ encoder_outputs=None,
1473
+ **kwargs,
1474
+ ):
1475
+ # initializing the cache
1476
+ batch_size, seq_length = decoder_input_ids.shape
1477
+
1478
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
1479
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1480
+ # But since the decoder uses a causal mask, those positions are masked anyways.
1481
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
1482
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1483
+ if decoder_attention_mask is not None:
1484
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
1485
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
1486
+ else:
1487
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1488
+
1489
+ return {
1490
+ "past_key_values": past_key_values,
1491
+ "encoder_outputs": encoder_outputs,
1492
+ "encoder_attention_mask": attention_mask,
1493
+ "decoder_attention_mask": extended_attention_mask,
1494
+ "decoder_position_ids": position_ids,
1495
+ }
1496
+
1497
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1498
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1499
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
1500
+ return model_kwargs
1501
+
1502
+
1503
+ FLAX_BART_CONDITIONAL_GENERATION_DOCSTRING = """
1504
+ Returns:
1505
+
1506
+ Summarization example:
1507
+
1508
+ ```python
1509
+ >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration
1510
+
1511
+ >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
1512
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
1513
+
1514
+ >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
1515
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")
1516
+
1517
+ >>> # Generate Summary
1518
+ >>> summary_ids = model.generate(inputs["input_ids"]).sequences
1519
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
1520
+ ```
1521
+
1522
+ Mask filling example:
1523
+
1524
+ ```python
1525
+ >>> import jax
1526
+ >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration
1527
+
1528
+ >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large")
1529
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")
1530
+
1531
+ >>> TXT = "My friends are <mask> but they eat too many carbs."
1532
+ >>> input_ids = tokenizer([TXT], return_tensors="jax")["input_ids"]
1533
+
1534
+ >>> logits = model(input_ids).logits
1535
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item()
1536
+ >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
1537
+ >>> values, predictions = jax.lax.top_k(probs, k=1)
1538
+
1539
+ >>> tokenizer.decode(predictions).split()
1540
+ ```
1541
+ """
1542
+
1543
+ overwrite_call_docstring(
1544
+ FlaxBartForConditionalGeneration, BART_INPUTS_DOCSTRING + FLAX_BART_CONDITIONAL_GENERATION_DOCSTRING
1545
+ )
1546
+ append_replace_return_docstrings(
1547
+ FlaxBartForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
1548
+ )
1549
+
1550
+
1551
+ class FlaxBartForSequenceClassificationModule(nn.Module):
1552
+ config: BartConfig
1553
+ dtype: jnp.dtype = jnp.float32
1554
+ num_labels: Optional[int] = None
1555
+
1556
+ def setup(self):
1557
+ self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
1558
+ self.classification_head = FlaxBartClassificationHead(
1559
+ config=self.config,
1560
+ inner_dim=self.config.d_model,
1561
+ num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels,
1562
+ pooler_dropout=self.config.classifier_dropout,
1563
+ )
1564
+
1565
+ def _get_encoder_module(self):
1566
+ return self.model.encoder
1567
+
1568
+ def _get_decoder_module(self):
1569
+ return self.model.decoder
1570
+
1571
+ def __call__(
1572
+ self,
1573
+ input_ids,
1574
+ attention_mask,
1575
+ decoder_input_ids,
1576
+ decoder_attention_mask,
1577
+ position_ids,
1578
+ decoder_position_ids,
1579
+ output_attentions: bool = False,
1580
+ output_hidden_states: bool = False,
1581
+ return_dict: bool = True,
1582
+ deterministic: bool = True,
1583
+ ):
1584
+ outputs = self.model(
1585
+ input_ids=input_ids,
1586
+ attention_mask=attention_mask,
1587
+ decoder_input_ids=decoder_input_ids,
1588
+ decoder_attention_mask=decoder_attention_mask,
1589
+ position_ids=position_ids,
1590
+ decoder_position_ids=decoder_position_ids,
1591
+ output_attentions=output_attentions,
1592
+ output_hidden_states=output_hidden_states,
1593
+ return_dict=return_dict,
1594
+ deterministic=deterministic,
1595
+ )
1596
+
1597
+ hidden_states = outputs[0] # last hidden state
1598
+
1599
+ eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0)
1600
+
1601
+ # The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation
1602
+ if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer:
1603
+ if len(jnp.unique(eos_mask.sum(1))) > 1:
1604
+ raise ValueError("All examples must have the same number of <eos> tokens.")
1605
+
1606
+ if any(eos_mask.sum(1) == 0):
1607
+ raise ValueError("There are missing <eos> tokens in input_ids")
1608
+
1609
+ # Ensure to keep 1 only for the last <eos> token for each example
1610
+ eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6
1611
+ eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0)
1612
+
1613
+ sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1)
1614
+ logits = self.classification_head(sentence_representation, deterministic=deterministic)
1615
+
1616
+ if not return_dict:
1617
+ output = (logits,) + outputs[1:]
1618
+ return output
1619
+
1620
+ return FlaxSeq2SeqSequenceClassifierOutput(
1621
+ logits=logits,
1622
+ decoder_hidden_states=outputs.decoder_hidden_states,
1623
+ decoder_attentions=outputs.decoder_attentions,
1624
+ cross_attentions=outputs.cross_attentions,
1625
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1626
+ encoder_hidden_states=outputs.encoder_hidden_states,
1627
+ encoder_attentions=outputs.encoder_attentions,
1628
+ )
1629
+
1630
+
1631
+ @add_start_docstrings(
1632
+ """
1633
+ Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
1634
+ tasks.
1635
+ """,
1636
+ BART_START_DOCSTRING,
1637
+ )
1638
+ class FlaxBartForSequenceClassification(FlaxBartPreTrainedModel):
1639
+ module_class = FlaxBartForSequenceClassificationModule
1640
+ dtype = jnp.float32
1641
+
1642
+
1643
+ append_call_sample_docstring(
1644
+ FlaxBartForSequenceClassification,
1645
+ _CHECKPOINT_FOR_DOC,
1646
+ FlaxSeq2SeqSequenceClassifierOutput,
1647
+ _CONFIG_FOR_DOC,
1648
+ )
1649
+
1650
+
1651
+ class FlaxBartForQuestionAnsweringModule(nn.Module):
1652
+ config: BartConfig
1653
+ dtype: jnp.dtype = jnp.float32
1654
+ num_labels = 2
1655
+
1656
+ def setup(self):
1657
+ self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
1658
+ self.qa_outputs = nn.Dense(
1659
+ self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
1660
+ )
1661
+
1662
+ def _get_encoder_module(self):
1663
+ return self.model.encoder
1664
+
1665
+ def _get_decoder_module(self):
1666
+ return self.model.decoder
1667
+
1668
+ def __call__(
1669
+ self,
1670
+ input_ids,
1671
+ attention_mask,
1672
+ decoder_input_ids,
1673
+ decoder_attention_mask,
1674
+ position_ids,
1675
+ decoder_position_ids,
1676
+ output_attentions: bool = False,
1677
+ output_hidden_states: bool = False,
1678
+ return_dict: bool = True,
1679
+ deterministic: bool = True,
1680
+ ):
1681
+ outputs = self.model(
1682
+ input_ids=input_ids,
1683
+ attention_mask=attention_mask,
1684
+ decoder_input_ids=decoder_input_ids,
1685
+ decoder_attention_mask=decoder_attention_mask,
1686
+ position_ids=position_ids,
1687
+ decoder_position_ids=decoder_position_ids,
1688
+ output_attentions=output_attentions,
1689
+ output_hidden_states=output_hidden_states,
1690
+ return_dict=return_dict,
1691
+ deterministic=deterministic,
1692
+ )
1693
+
1694
+ sequence_output = outputs[0]
1695
+
1696
+ logits = self.qa_outputs(sequence_output)
1697
+ start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1)
1698
+ start_logits = start_logits.squeeze(-1)
1699
+ end_logits = end_logits.squeeze(-1)
1700
+
1701
+ if not return_dict:
1702
+ output = (start_logits, end_logits) + outputs[1:]
1703
+ return output
1704
+
1705
+ return FlaxSeq2SeqQuestionAnsweringModelOutput(
1706
+ start_logits=start_logits,
1707
+ end_logits=end_logits,
1708
+ decoder_hidden_states=outputs.decoder_hidden_states,
1709
+ decoder_attentions=outputs.decoder_attentions,
1710
+ cross_attentions=outputs.cross_attentions,
1711
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1712
+ encoder_hidden_states=outputs.encoder_hidden_states,
1713
+ encoder_attentions=outputs.encoder_attentions,
1714
+ )
1715
+
1716
+
1717
+ @add_start_docstrings(
1718
+ """
1719
+ BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1720
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1721
+ """,
1722
+ BART_START_DOCSTRING,
1723
+ )
1724
+ class FlaxBartForQuestionAnswering(FlaxBartPreTrainedModel):
1725
+ module_class = FlaxBartForQuestionAnsweringModule
1726
+ dtype = jnp.float32
1727
+
1728
+
1729
+ append_call_sample_docstring(
1730
+ FlaxBartForQuestionAnswering,
1731
+ _CHECKPOINT_FOR_DOC,
1732
+ FlaxSeq2SeqQuestionAnsweringModelOutput,
1733
+ _CONFIG_FOR_DOC,
1734
+ )
1735
+
1736
+
1737
+ class FlaxBartDecoderPreTrainedModel(FlaxPreTrainedModel):
1738
+ config_class = BartConfig
1739
+ base_model_prefix: str = "model"
1740
+ module_class: nn.Module = None
1741
+
1742
+ def __init__(
1743
+ self,
1744
+ config: BartConfig,
1745
+ input_shape: Tuple[int] = (1, 1),
1746
+ seed: int = 0,
1747
+ dtype: jnp.dtype = jnp.float32,
1748
+ _do_init: bool = True,
1749
+ **kwargs,
1750
+ ):
1751
+ config.is_decoder = True
1752
+ config.is_encoder_decoder = False
1753
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
1754
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
1755
+
1756
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
1757
+ # init input tensors
1758
+ input_ids = jnp.zeros(input_shape, dtype="i4")
1759
+ attention_mask = jnp.ones_like(input_ids)
1760
+
1761
+ batch_size, sequence_length = input_ids.shape
1762
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1763
+
1764
+ params_rng, dropout_rng = jax.random.split(rng)
1765
+ rngs = {"params": params_rng, "dropout": dropout_rng}
1766
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.d_model,))
1767
+ encoder_attention_mask = attention_mask
1768
+ module_init_outputs = self.module.init(
1769
+ rngs,
1770
+ input_ids,
1771
+ attention_mask,
1772
+ position_ids,
1773
+ encoder_hidden_states,
1774
+ encoder_attention_mask,
1775
+ return_dict=False,
1776
+ )
1777
+ return module_init_outputs["params"]
1778
+
1779
+ def init_cache(self, batch_size, max_length):
1780
+ r"""
1781
+ Args:
1782
+ batch_size (`int`):
1783
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
1784
+ max_length (`int`):
1785
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
1786
+ cache.
1787
+ """
1788
+ # init input variables to retrieve cache
1789
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
1790
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
1791
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
1792
+
1793
+ init_variables = self.module.init(
1794
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
1795
+ )
1796
+ return unfreeze(init_variables["cache"])
1797
+
1798
+ @add_start_docstrings_to_model_forward(BART_DECODE_INPUTS_DOCSTRING)
1799
+ def __call__(
1800
+ self,
1801
+ input_ids: jnp.ndarray,
1802
+ attention_mask: Optional[jnp.ndarray] = None,
1803
+ position_ids: Optional[jnp.ndarray] = None,
1804
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1805
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1806
+ output_attentions: Optional[bool] = None,
1807
+ output_hidden_states: Optional[bool] = None,
1808
+ return_dict: Optional[bool] = None,
1809
+ train: bool = False,
1810
+ params: dict = None,
1811
+ past_key_values: dict = None,
1812
+ dropout_rng: PRNGKey = None,
1813
+ ):
1814
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1815
+ output_hidden_states = (
1816
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1817
+ )
1818
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1819
+
1820
+ if encoder_hidden_states is not None and encoder_attention_mask is None:
1821
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1822
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1823
+
1824
+ # prepare decoder inputs
1825
+ if attention_mask is None:
1826
+ attention_mask = jnp.ones_like(input_ids)
1827
+ if position_ids is None:
1828
+ batch_size, sequence_length = input_ids.shape
1829
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1830
+
1831
+ # Handle any PRNG if needed
1832
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
1833
+
1834
+ inputs = {"params": params or self.params}
1835
+
1836
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
1837
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
1838
+ # changed by FlaxBartAttention module
1839
+ if past_key_values:
1840
+ inputs["cache"] = past_key_values
1841
+ mutable = ["cache"]
1842
+ else:
1843
+ mutable = False
1844
+
1845
+ outputs = self.module.apply(
1846
+ inputs,
1847
+ input_ids=jnp.array(input_ids, dtype="i4"),
1848
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1849
+ position_ids=jnp.array(position_ids, dtype="i4"),
1850
+ encoder_hidden_states=encoder_hidden_states,
1851
+ encoder_attention_mask=encoder_attention_mask,
1852
+ output_attentions=output_attentions,
1853
+ output_hidden_states=output_hidden_states,
1854
+ return_dict=return_dict,
1855
+ deterministic=not train,
1856
+ rngs=rngs,
1857
+ mutable=mutable,
1858
+ )
1859
+
1860
+ # add updated cache to model output
1861
+ if past_key_values is not None and return_dict:
1862
+ outputs, past_key_values = outputs
1863
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
1864
+ return outputs
1865
+ elif past_key_values is not None and not return_dict:
1866
+ outputs, past_key_values = outputs
1867
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
1868
+
1869
+ return outputs
1870
+
1871
+
1872
+ class FlaxBartDecoderWrapper(nn.Module):
1873
+ """
1874
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1875
+ used in combination with the [`EncoderDecoderModel`] framework.
1876
+ """
1877
+
1878
+ config: BartConfig
1879
+ dtype: jnp.dtype = jnp.float32
1880
+
1881
+ def setup(self):
1882
+ embed_dim = self.config.d_model
1883
+ embed_tokens = nn.Embed(
1884
+ self.config.vocab_size,
1885
+ embed_dim,
1886
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
1887
+ dtype=self.dtype,
1888
+ )
1889
+ self.decoder = FlaxBartDecoder(config=self.config, embed_tokens=embed_tokens, dtype=self.dtype)
1890
+
1891
+ def __call__(self, *args, **kwargs):
1892
+ return self.decoder(*args, **kwargs)
1893
+
1894
+
1895
+ class FlaxBartForCausalLMModule(nn.Module):
1896
+ config: BartConfig
1897
+ dtype: jnp.dtype = jnp.float32
1898
+
1899
+ def setup(self):
1900
+ self.model = FlaxBartDecoderWrapper(config=self.config, dtype=self.dtype)
1901
+ self.lm_head = nn.Dense(
1902
+ self.config.vocab_size,
1903
+ use_bias=False,
1904
+ dtype=self.dtype,
1905
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
1906
+ )
1907
+
1908
+ def __call__(
1909
+ self,
1910
+ input_ids,
1911
+ attention_mask,
1912
+ position_ids,
1913
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1914
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1915
+ init_cache: bool = False,
1916
+ output_attentions: bool = False,
1917
+ output_hidden_states: bool = False,
1918
+ return_dict: bool = True,
1919
+ deterministic: bool = True,
1920
+ ):
1921
+ outputs = self.model(
1922
+ input_ids,
1923
+ attention_mask,
1924
+ position_ids,
1925
+ encoder_hidden_states,
1926
+ encoder_attention_mask,
1927
+ deterministic=deterministic,
1928
+ init_cache=init_cache,
1929
+ output_attentions=output_attentions,
1930
+ output_hidden_states=output_hidden_states,
1931
+ return_dict=return_dict,
1932
+ )
1933
+
1934
+ hidden_states = outputs[0]
1935
+
1936
+ if self.config.tie_word_embeddings:
1937
+ shared_embedding = self.model.variables["params"]["decoder"]["embed_tokens"]["embedding"]
1938
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1939
+ else:
1940
+ lm_logits = self.lm_head(hidden_states)
1941
+
1942
+ if not return_dict:
1943
+ return (lm_logits,) + outputs[1:]
1944
+
1945
+ return FlaxCausalLMOutputWithCrossAttentions(
1946
+ logits=lm_logits,
1947
+ hidden_states=outputs.hidden_states,
1948
+ attentions=outputs.attentions,
1949
+ cross_attentions=outputs.cross_attentions,
1950
+ )
1951
+
1952
+
1953
+ @add_start_docstrings(
1954
+ """
1955
+ Bart Decoder Model with a language modeling head on top (linear layer with weights tied to the input embeddings)
1956
+ e.g for autoregressive tasks.
1957
+ """,
1958
+ BART_START_DOCSTRING,
1959
+ )
1960
+ class FlaxBartForCausalLM(FlaxBartDecoderPreTrainedModel):
1961
+ module_class = FlaxBartForCausalLMModule
1962
+
1963
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
1964
+ # initializing the cache
1965
+ batch_size, seq_length = input_ids.shape
1966
+
1967
+ past_key_values = self.init_cache(batch_size, max_length)
1968
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1969
+ # But since the decoder uses a causal mask, those positions are masked anyway.
1970
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
1971
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1972
+ if attention_mask is not None:
1973
+ position_ids = attention_mask.cumsum(axis=-1) - 1
1974
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
1975
+ else:
1976
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1977
+
1978
+ return {
1979
+ "past_key_values": past_key_values,
1980
+ "attention_mask": extended_attention_mask,
1981
+ "position_ids": position_ids,
1982
+ }
1983
+
1984
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1985
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1986
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
1987
+ return model_kwargs
1988
+
1989
+
1990
+ append_call_sample_docstring(
1991
+ FlaxBartForCausalLM,
1992
+ _CHECKPOINT_FOR_DOC,
1993
+ FlaxCausalLMOutputWithCrossAttentions,
1994
+ _CONFIG_FOR_DOC,
1995
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/modeling_tf_bart.py ADDED
@@ -0,0 +1,1712 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Bart model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import random
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...activations_tf import get_tf_activation
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutput,
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFSeq2SeqLMOutput,
31
+ TFSeq2SeqModelOutput,
32
+ TFSeq2SeqSequenceClassifierOutput,
33
+ )
34
+
35
+ # Public API
36
+ from ...modeling_tf_utils import (
37
+ TFCausalLanguageModelingLoss,
38
+ TFModelInputType,
39
+ TFPreTrainedModel,
40
+ TFSequenceClassificationLoss,
41
+ keras,
42
+ keras_serializable,
43
+ unpack_inputs,
44
+ )
45
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
46
+ from ...utils import (
47
+ add_code_sample_docstrings,
48
+ add_end_docstrings,
49
+ add_start_docstrings,
50
+ add_start_docstrings_to_model_forward,
51
+ logging,
52
+ replace_return_docstrings,
53
+ )
54
+ from .configuration_bart import BartConfig
55
+
56
+
57
+ logger = logging.get_logger(__name__)
58
+
59
+ _CHECKPOINT_FOR_DOC = "facebook/bart-large"
60
+ _CONFIG_FOR_DOC = "BartConfig"
61
+
62
+
63
+ LARGE_NEGATIVE = -1e8
64
+
65
+
66
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
67
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
68
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
69
+ start_tokens = tf.fill(
70
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
71
+ )
72
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
73
+ # replace possible -100 values in labels by `pad_token_id`
74
+ shifted_input_ids = tf.where(
75
+ shifted_input_ids == -100,
76
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
77
+ shifted_input_ids,
78
+ )
79
+
80
+ # "Verify that `labels` has only positive values and -100"
81
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
82
+
83
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
84
+ with tf.control_dependencies([assert_gte0]):
85
+ shifted_input_ids = tf.identity(shifted_input_ids)
86
+
87
+ return shifted_input_ids
88
+
89
+
90
+ def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
91
+ """
92
+ Make causal mask used for bi-directional self-attention.
93
+ """
94
+ bsz = input_ids_shape[0]
95
+ tgt_len = input_ids_shape[1]
96
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
97
+ mask_cond = tf.range(shape_list(mask)[-1])
98
+
99
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
100
+
101
+ if past_key_values_length > 0:
102
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
103
+
104
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
105
+
106
+
107
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
108
+ """
109
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
110
+ """
111
+ src_len = shape_list(mask)[1]
112
+ tgt_len = tgt_len if tgt_len is not None else src_len
113
+ one_cst = tf.constant(1.0)
114
+ mask = tf.cast(mask, dtype=one_cst.dtype)
115
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
116
+
117
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
118
+
119
+
120
+ class TFBartLearnedPositionalEmbedding(keras.layers.Embedding):
121
+ """
122
+ This module learns positional embeddings up to a fixed maximum size.
123
+ """
124
+
125
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
126
+ # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
127
+ # and adjust num_embeddings appropriately. Other models don't have this hack
128
+ self.offset = 2
129
+ super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
130
+
131
+ def call(
132
+ self,
133
+ input_shape: Optional[tf.TensorShape] = None,
134
+ past_key_values_length: int = 0,
135
+ position_ids: tf.Tensor | None = None,
136
+ ):
137
+ """Input is expected to be of size [bsz x seqlen]."""
138
+ if position_ids is None:
139
+ seq_len = input_shape[1]
140
+ position_ids = tf.range(seq_len, delta=1, name="range")
141
+ position_ids += past_key_values_length
142
+
143
+ offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32
144
+ return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype))
145
+
146
+
147
+ class TFBartAttention(keras.layers.Layer):
148
+ """Multi-headed attention from "Attention Is All You Need"""
149
+
150
+ def __init__(
151
+ self,
152
+ embed_dim: int,
153
+ num_heads: int,
154
+ dropout: float = 0.0,
155
+ is_decoder: bool = False,
156
+ bias: bool = True,
157
+ **kwargs,
158
+ ):
159
+ super().__init__(**kwargs)
160
+ self.embed_dim = embed_dim
161
+
162
+ self.num_heads = num_heads
163
+ self.dropout = keras.layers.Dropout(dropout)
164
+ self.head_dim = embed_dim // num_heads
165
+ if (self.head_dim * num_heads) != self.embed_dim:
166
+ raise ValueError(
167
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
168
+ f" and `num_heads`: {num_heads})."
169
+ )
170
+ self.scaling = self.head_dim**-0.5
171
+ self.is_decoder = is_decoder
172
+
173
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
174
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
175
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
176
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
177
+
178
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
179
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
180
+
181
+ def call(
182
+ self,
183
+ hidden_states: tf.Tensor,
184
+ key_value_states: tf.Tensor | None = None,
185
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
186
+ attention_mask: tf.Tensor | None = None,
187
+ layer_head_mask: tf.Tensor | None = None,
188
+ training: Optional[bool] = False,
189
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
190
+ """Input shape: Batch x Time x Channel"""
191
+
192
+ # if key_value_states are provided this layer is used as a cross-attention layer
193
+ # for the decoder
194
+ is_cross_attention = key_value_states is not None
195
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
196
+
197
+ # get query proj
198
+ query_states = self.q_proj(hidden_states) * self.scaling
199
+ # get key, value proj
200
+ if is_cross_attention and past_key_value is not None:
201
+ # reuse k,v, cross_attentions
202
+ key_states = past_key_value[0]
203
+ value_states = past_key_value[1]
204
+ elif is_cross_attention:
205
+ # cross_attentions
206
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
207
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
208
+ elif past_key_value is not None:
209
+ # reuse k, v, self_attention
210
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
211
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
212
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
213
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
214
+ else:
215
+ # self_attention
216
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
217
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
218
+
219
+ if self.is_decoder:
220
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
221
+ # Further calls to cross_attention layer can then reuse all cross-attention
222
+ # key/value_states (first "if" case)
223
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
224
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
225
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
226
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
227
+ past_key_value = (key_states, value_states)
228
+
229
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
230
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
231
+ key_states = tf.reshape(key_states, proj_shape)
232
+ value_states = tf.reshape(value_states, proj_shape)
233
+
234
+ src_len = shape_list(key_states)[1]
235
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
236
+
237
+ tf.debugging.assert_equal(
238
+ shape_list(attn_weights),
239
+ [bsz * self.num_heads, tgt_len, src_len],
240
+ message=(
241
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
242
+ f" {shape_list(attn_weights)}"
243
+ ),
244
+ )
245
+
246
+ if attention_mask is not None:
247
+ tf.debugging.assert_equal(
248
+ shape_list(attention_mask),
249
+ [bsz, 1, tgt_len, src_len],
250
+ message=(
251
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
252
+ f" {shape_list(attention_mask)}"
253
+ ),
254
+ )
255
+
256
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
257
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
258
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
259
+
260
+ attn_weights = stable_softmax(attn_weights, axis=-1)
261
+
262
+ if layer_head_mask is not None:
263
+ tf.debugging.assert_equal(
264
+ shape_list(layer_head_mask),
265
+ [self.num_heads],
266
+ message=(
267
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
268
+ f" {shape_list(layer_head_mask)}"
269
+ ),
270
+ )
271
+
272
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
273
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
274
+ )
275
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
276
+
277
+ attn_probs = self.dropout(attn_weights, training=training)
278
+ attn_output = tf.matmul(attn_probs, value_states)
279
+
280
+ tf.debugging.assert_equal(
281
+ shape_list(attn_output),
282
+ [bsz * self.num_heads, tgt_len, self.head_dim],
283
+ message=(
284
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
285
+ f" {shape_list(attn_output)}"
286
+ ),
287
+ )
288
+
289
+ attn_output = tf.transpose(
290
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
291
+ )
292
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
293
+
294
+ attn_output = self.out_proj(attn_output)
295
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
296
+
297
+ return attn_output, attn_weights, past_key_value
298
+
299
+ def build(self, input_shape=None):
300
+ if self.built:
301
+ return
302
+ self.built = True
303
+ if getattr(self, "k_proj", None) is not None:
304
+ with tf.name_scope(self.k_proj.name):
305
+ self.k_proj.build([None, None, self.embed_dim])
306
+ if getattr(self, "q_proj", None) is not None:
307
+ with tf.name_scope(self.q_proj.name):
308
+ self.q_proj.build([None, None, self.embed_dim])
309
+ if getattr(self, "v_proj", None) is not None:
310
+ with tf.name_scope(self.v_proj.name):
311
+ self.v_proj.build([None, None, self.embed_dim])
312
+ if getattr(self, "out_proj", None) is not None:
313
+ with tf.name_scope(self.out_proj.name):
314
+ self.out_proj.build([None, None, self.embed_dim])
315
+
316
+
317
+ class TFBartEncoderLayer(keras.layers.Layer):
318
+ def __init__(self, config: BartConfig, **kwargs):
319
+ super().__init__(**kwargs)
320
+ self.embed_dim = config.d_model
321
+ self.self_attn = TFBartAttention(
322
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
323
+ )
324
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
325
+ self.dropout = keras.layers.Dropout(config.dropout)
326
+ self.activation_fn = get_tf_activation(config.activation_function)
327
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
328
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
329
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
330
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
331
+ self.config = config
332
+
333
+ def call(
334
+ self,
335
+ hidden_states: tf.Tensor,
336
+ attention_mask: np.ndarray | tf.Tensor | None,
337
+ layer_head_mask: tf.Tensor | None,
338
+ training: Optional[bool] = False,
339
+ ) -> tf.Tensor:
340
+ """
341
+ Args:
342
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
343
+ attention_mask (`tf.Tensor`): attention mask of size
344
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
345
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
346
+ `(encoder_attention_heads,)`
347
+ """
348
+ residual = hidden_states
349
+ hidden_states, self_attn_weights, _ = self.self_attn(
350
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
351
+ )
352
+
353
+ tf.debugging.assert_equal(
354
+ shape_list(hidden_states),
355
+ shape_list(residual),
356
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
357
+ )
358
+
359
+ hidden_states = self.dropout(hidden_states, training=training)
360
+ hidden_states = residual + hidden_states
361
+ hidden_states = self.self_attn_layer_norm(hidden_states)
362
+
363
+ residual = hidden_states
364
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
365
+ hidden_states = self.activation_dropout(hidden_states, training=training)
366
+ hidden_states = self.fc2(hidden_states)
367
+ hidden_states = self.dropout(hidden_states, training=training)
368
+ hidden_states = residual + hidden_states
369
+ hidden_states = self.final_layer_norm(hidden_states)
370
+
371
+ return hidden_states, self_attn_weights
372
+
373
+ def build(self, input_shape=None):
374
+ if self.built:
375
+ return
376
+ self.built = True
377
+ if getattr(self, "self_attn", None) is not None:
378
+ with tf.name_scope(self.self_attn.name):
379
+ self.self_attn.build(None)
380
+ if getattr(self, "self_attn_layer_norm", None) is not None:
381
+ with tf.name_scope(self.self_attn_layer_norm.name):
382
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
383
+ if getattr(self, "fc1", None) is not None:
384
+ with tf.name_scope(self.fc1.name):
385
+ self.fc1.build([None, None, self.embed_dim])
386
+ if getattr(self, "fc2", None) is not None:
387
+ with tf.name_scope(self.fc2.name):
388
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
389
+ if getattr(self, "final_layer_norm", None) is not None:
390
+ with tf.name_scope(self.final_layer_norm.name):
391
+ self.final_layer_norm.build([None, None, self.embed_dim])
392
+
393
+
394
+ class TFBartDecoderLayer(keras.layers.Layer):
395
+ def __init__(self, config: BartConfig, **kwargs):
396
+ super().__init__(**kwargs)
397
+ self.embed_dim = config.d_model
398
+ self.self_attn = TFBartAttention(
399
+ embed_dim=self.embed_dim,
400
+ num_heads=config.decoder_attention_heads,
401
+ dropout=config.attention_dropout,
402
+ name="self_attn",
403
+ is_decoder=True,
404
+ )
405
+ self.dropout = keras.layers.Dropout(config.dropout)
406
+ self.activation_fn = get_tf_activation(config.activation_function)
407
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
408
+
409
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
410
+ self.encoder_attn = TFBartAttention(
411
+ self.embed_dim,
412
+ config.decoder_attention_heads,
413
+ dropout=config.attention_dropout,
414
+ name="encoder_attn",
415
+ is_decoder=True,
416
+ )
417
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
418
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
419
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
420
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
421
+ self.config = config
422
+
423
+ def call(
424
+ self,
425
+ hidden_states: tf.Tensor,
426
+ attention_mask: np.ndarray | tf.Tensor | None = None,
427
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
428
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
429
+ layer_head_mask: tf.Tensor | None = None,
430
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
431
+ past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
432
+ training: Optional[bool] = False,
433
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
434
+ """
435
+ Args:
436
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
437
+ attention_mask (`tf.Tensor`): attention mask of size
438
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
439
+ encoder_hidden_states (`tf.Tensor`):
440
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
441
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
442
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
443
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
444
+ `(decoder_attention_heads,)`
445
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
446
+ `(decoder_attention_heads,)`
447
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
448
+ """
449
+ residual = hidden_states
450
+
451
+ # Self Attention
452
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
453
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
454
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
455
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
456
+ hidden_states=hidden_states,
457
+ past_key_value=self_attn_past_key_value,
458
+ attention_mask=attention_mask,
459
+ layer_head_mask=layer_head_mask,
460
+ )
461
+ hidden_states = self.dropout(hidden_states, training=training)
462
+ hidden_states = residual + hidden_states
463
+ hidden_states = self.self_attn_layer_norm(hidden_states)
464
+
465
+ # Cross-Attention Block
466
+ cross_attn_present_key_value = None
467
+ cross_attn_weights = None
468
+ if encoder_hidden_states is not None:
469
+ residual = hidden_states
470
+
471
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
472
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
473
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
474
+ hidden_states=hidden_states,
475
+ key_value_states=encoder_hidden_states,
476
+ attention_mask=encoder_attention_mask,
477
+ layer_head_mask=cross_attn_layer_head_mask,
478
+ past_key_value=cross_attn_past_key_value,
479
+ )
480
+ hidden_states = self.dropout(hidden_states, training=training)
481
+ hidden_states = residual + hidden_states
482
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
483
+
484
+ # add cross-attn to positions 3,4 of present_key_value tuple
485
+ present_key_value = present_key_value + cross_attn_present_key_value
486
+
487
+ # Fully Connected
488
+ residual = hidden_states
489
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
490
+ hidden_states = self.activation_dropout(hidden_states, training=training)
491
+ hidden_states = self.fc2(hidden_states)
492
+ hidden_states = self.dropout(hidden_states, training=training)
493
+ hidden_states = residual + hidden_states
494
+ hidden_states = self.final_layer_norm(hidden_states)
495
+
496
+ return (
497
+ hidden_states,
498
+ self_attn_weights,
499
+ cross_attn_weights,
500
+ present_key_value,
501
+ )
502
+
503
+ def build(self, input_shape=None):
504
+ if self.built:
505
+ return
506
+ self.built = True
507
+ if getattr(self, "self_attn", None) is not None:
508
+ with tf.name_scope(self.self_attn.name):
509
+ self.self_attn.build(None)
510
+ if getattr(self, "self_attn_layer_norm", None) is not None:
511
+ with tf.name_scope(self.self_attn_layer_norm.name):
512
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
513
+ if getattr(self, "encoder_attn", None) is not None:
514
+ with tf.name_scope(self.encoder_attn.name):
515
+ self.encoder_attn.build(None)
516
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
517
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
518
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
519
+ if getattr(self, "fc1", None) is not None:
520
+ with tf.name_scope(self.fc1.name):
521
+ self.fc1.build([None, None, self.embed_dim])
522
+ if getattr(self, "fc2", None) is not None:
523
+ with tf.name_scope(self.fc2.name):
524
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
525
+ if getattr(self, "final_layer_norm", None) is not None:
526
+ with tf.name_scope(self.final_layer_norm.name):
527
+ self.final_layer_norm.build([None, None, self.embed_dim])
528
+
529
+
530
+ class TFBartClassificationHead(keras.layers.Layer):
531
+ """Head for sentence-level classification tasks."""
532
+
533
+ def __init__(self, inner_dim: int, num_classes: int, pooler_dropout: float, name: str, **kwargs):
534
+ super().__init__(name=name, **kwargs)
535
+ self.dense = keras.layers.Dense(inner_dim, name="dense")
536
+ self.dropout = keras.layers.Dropout(pooler_dropout)
537
+ self.out_proj = keras.layers.Dense(num_classes, name="out_proj")
538
+ self.input_dim = inner_dim
539
+ self.inner_dim = inner_dim
540
+
541
+ def call(self, inputs):
542
+ hidden_states = self.dropout(inputs)
543
+ hidden_states = self.dense(hidden_states)
544
+ hidden_states = keras.activations.tanh(hidden_states)
545
+ hidden_states = self.dropout(hidden_states)
546
+ hidden_states = self.out_proj(hidden_states)
547
+ return hidden_states
548
+
549
+ def build(self, input_shape=None):
550
+ if self.built:
551
+ return
552
+ self.built = True
553
+ if getattr(self, "dense", None) is not None:
554
+ with tf.name_scope(self.dense.name):
555
+ self.dense.build([None, None, self.input_dim])
556
+ if getattr(self, "out_proj", None) is not None:
557
+ with tf.name_scope(self.out_proj.name):
558
+ self.out_proj.build([None, None, self.inner_dim])
559
+
560
+
561
+ class TFBartPretrainedModel(TFPreTrainedModel):
562
+ config_class = BartConfig
563
+ base_model_prefix = "model"
564
+
565
+ @property
566
+ def dummy_inputs(self):
567
+ dummy_inputs = super().dummy_inputs
568
+ # Dummy inputs should not contain the default val of 1
569
+ # as this is the padding token and some assertions check it
570
+ dummy_inputs["input_ids"] = dummy_inputs["input_ids"] * 2
571
+ if "decoder_input_ids" in dummy_inputs:
572
+ dummy_inputs["decoder_input_ids"] = dummy_inputs["decoder_input_ids"] * 2
573
+ return dummy_inputs
574
+
575
+ def tf_to_pt_weight_rename(self, tf_weight):
576
+ if tf_weight == "model.shared.weight":
577
+ return tf_weight, "model.decoder.embed_tokens.weight"
578
+ else:
579
+ return (tf_weight,)
580
+
581
+
582
+ BART_START_DOCSTRING = r"""
583
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
584
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
585
+ etc.)
586
+
587
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
588
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
589
+ behavior.
590
+
591
+ <Tip>
592
+
593
+ TensorFlow models and layers in `transformers` accept two formats as input:
594
+
595
+ - having all inputs as keyword arguments (like PyTorch models), or
596
+ - having all inputs as a list, tuple or dict in the first positional argument.
597
+
598
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
599
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
600
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
601
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
602
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
603
+ positional argument:
604
+
605
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
606
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
607
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
608
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
609
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
610
+
611
+ Note that when creating models and layers with
612
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
613
+ about any of this, as you can just pass inputs like you would to any other Python function!
614
+
615
+ </Tip>
616
+
617
+ Args:
618
+ config ([`BartConfig`]): Model configuration class with all the parameters of the model.
619
+ Initializing with a config file does not load the weights associated with the model, only the
620
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
621
+ """
622
+
623
+
624
+ BART_GENERATION_EXAMPLE = r"""
625
+ Summarization example:
626
+
627
+ ```python
628
+ >>> from transformers import AutoTokenizer, TFBartForConditionalGeneration
629
+
630
+ >>> model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large")
631
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")
632
+
633
+ >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
634
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="tf")
635
+
636
+ >>> # Generate Summary
637
+ >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5)
638
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
639
+ ```
640
+
641
+ Mask filling example:
642
+
643
+ ```python
644
+ >>> from transformers import AutoTokenizer, TFBartForConditionalGeneration
645
+
646
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")
647
+ >>> TXT = "My friends are <mask> but they eat too many carbs."
648
+
649
+ >>> model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large")
650
+ >>> input_ids = tokenizer([TXT], return_tensors="tf")["input_ids"]
651
+ >>> logits = model(input_ids).logits
652
+ >>> probs = tf.nn.softmax(logits[0])
653
+ >>> # probs[5] is associated with the mask token
654
+ ```
655
+ """
656
+
657
+
658
+ BART_INPUTS_DOCSTRING = r"""
659
+ Args:
660
+ input_ids (`tf.Tensor` of shape `({0})`):
661
+ Indices of input sequence tokens in the vocabulary.
662
+
663
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
664
+ [`PreTrainedTokenizer.__call__`] for details.
665
+
666
+ [What are input IDs?](../glossary#input-ids)
667
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
668
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
669
+
670
+ - 1 for tokens that are **not masked**,
671
+ - 0 for tokens that are **masked**.
672
+
673
+ [What are attention masks?](../glossary#attention-mask)
674
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
675
+ Indices of decoder input sequence tokens in the vocabulary.
676
+
677
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
678
+ [`PreTrainedTokenizer.__call__`] for details.
679
+
680
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
681
+
682
+ Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
683
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
684
+
685
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
686
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
687
+ for denoising pre-training following the paper.
688
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
689
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
690
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
691
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
692
+ range `[0, config.max_position_embeddings - 1]`.
693
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
694
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
695
+
696
+ - 1 indicates the head is **not masked**,
697
+ - 0 indicates the head is **masked**.
698
+
699
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
700
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
701
+
702
+ - 1 indicates the head is **not masked**,
703
+ - 0 indicates the head is **masked**.
704
+
705
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
706
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
707
+
708
+ - 1 indicates the head is **not masked**,
709
+ - 0 indicates the head is **masked**.
710
+
711
+ encoder_outputs (`tf.FloatTensor`, *optional*):
712
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
713
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
714
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
715
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
716
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
717
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
718
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
719
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
720
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
721
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
722
+ than the model's internal embedding lookup matrix.
723
+ use_cache (`bool`, *optional*, defaults to `True`):
724
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
725
+ `past_key_values`). Set to `False` during training, `True` during generation
726
+ output_attentions (`bool`, *optional*):
727
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
728
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
729
+ config will be used instead.
730
+ output_hidden_states (`bool`, *optional*):
731
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
732
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
733
+ used instead.
734
+ return_dict (`bool`, *optional*):
735
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
736
+ eager mode, in graph mode the value will always be set to True.
737
+ training (`bool`, *optional*, defaults to `False`):
738
+ Whether or not to use the model in training mode (some modules like dropout modules have different
739
+ behaviors between training and evaluation).
740
+ """
741
+
742
+
743
+ @keras_serializable
744
+ class TFBartEncoder(keras.layers.Layer):
745
+ config_class = BartConfig
746
+ """
747
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
748
+ [`TFBartEncoderLayer`].
749
+
750
+ Args:
751
+ config: BartConfig
752
+ """
753
+
754
+ def __init__(self, config: BartConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
755
+ super().__init__(**kwargs)
756
+ self.config = config
757
+ self.dropout = keras.layers.Dropout(config.dropout)
758
+ self.layerdrop = config.encoder_layerdrop
759
+ self.padding_idx = config.pad_token_id
760
+ self.max_source_positions = config.max_position_embeddings
761
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
762
+
763
+ self.embed_tokens = embed_tokens
764
+ self.embed_positions = TFBartLearnedPositionalEmbedding(
765
+ config.max_position_embeddings,
766
+ config.d_model,
767
+ name="embed_positions",
768
+ )
769
+ self.layers = [TFBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
770
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
771
+ self.embed_dim = config.d_model
772
+
773
+ @unpack_inputs
774
+ def call(
775
+ self,
776
+ input_ids: TFModelInputType | None = None,
777
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
778
+ attention_mask: np.ndarray | tf.Tensor | None = None,
779
+ head_mask: np.ndarray | tf.Tensor | None = None,
780
+ output_attentions: Optional[bool] = None,
781
+ output_hidden_states: Optional[bool] = None,
782
+ return_dict: Optional[bool] = None,
783
+ training: Optional[bool] = False,
784
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
785
+ """
786
+ Args:
787
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
788
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
789
+ provide it.
790
+
791
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
792
+ [`PreTrainedTokenizer.__call__`] for details.
793
+
794
+ [What are input IDs?](../glossary#input-ids)
795
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
796
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
797
+
798
+ - 1 for tokens that are **not masked**,
799
+ - 0 for tokens that are **masked**.
800
+
801
+ [What are attention masks?](../glossary#attention-mask)
802
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
803
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
804
+
805
+ - 1 indicates the head is **not masked**,
806
+ - 0 indicates the head is **masked**.
807
+
808
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
809
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
810
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
811
+ than the model's internal embedding lookup matrix.
812
+ output_attentions (`bool`, *optional*):
813
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
814
+ returned tensors for more detail.
815
+ output_hidden_states (`bool`, *optional*):
816
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
817
+ for more detail.
818
+ return_dict (`bool`, *optional*):
819
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
820
+ """
821
+ if input_ids is not None and inputs_embeds is not None:
822
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
823
+ elif input_ids is not None:
824
+ input_shape = shape_list(input_ids)
825
+ elif inputs_embeds is not None:
826
+ input_shape = shape_list(inputs_embeds)[:-1]
827
+ else:
828
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
829
+
830
+ if inputs_embeds is None:
831
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
832
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
833
+
834
+ embed_pos = self.embed_positions(input_shape)
835
+ hidden_states = inputs_embeds + embed_pos
836
+ hidden_states = self.layernorm_embedding(hidden_states)
837
+ hidden_states = self.dropout(hidden_states, training=training)
838
+
839
+ # check attention mask and invert
840
+ if attention_mask is not None:
841
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
842
+ attention_mask = _expand_mask(attention_mask)
843
+ else:
844
+ attention_mask = None
845
+
846
+ encoder_states = () if output_hidden_states else None
847
+ all_attentions = () if output_attentions else None
848
+
849
+ # check if head_mask has a correct number of layers specified if desired
850
+ if head_mask is not None:
851
+ tf.debugging.assert_equal(
852
+ shape_list(head_mask)[0],
853
+ len(self.layers),
854
+ message=(
855
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
856
+ f" {shape_list(head_mask)[0]}."
857
+ ),
858
+ )
859
+
860
+ # encoder layers
861
+ for idx, encoder_layer in enumerate(self.layers):
862
+ if output_hidden_states:
863
+ encoder_states = encoder_states + (hidden_states,)
864
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
865
+ dropout_probability = random.uniform(0, 1)
866
+ if training and (dropout_probability < self.layerdrop): # skip the layer
867
+ continue
868
+
869
+ hidden_states, attn = encoder_layer(
870
+ hidden_states,
871
+ attention_mask,
872
+ head_mask[idx] if head_mask is not None else None,
873
+ )
874
+
875
+ if output_attentions:
876
+ all_attentions += (attn,)
877
+
878
+ if output_hidden_states:
879
+ encoder_states = encoder_states + (hidden_states,)
880
+
881
+ if not return_dict:
882
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
883
+ return TFBaseModelOutput(
884
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
885
+ )
886
+
887
+ def build(self, input_shape=None):
888
+ if self.built:
889
+ return
890
+ self.built = True
891
+ if getattr(self, "embed_positions", None) is not None:
892
+ with tf.name_scope(self.embed_positions.name):
893
+ self.embed_positions.build(None)
894
+ if getattr(self, "layernorm_embedding", None) is not None:
895
+ with tf.name_scope(self.layernorm_embedding.name):
896
+ self.layernorm_embedding.build([None, None, self.embed_dim])
897
+ if getattr(self, "layers", None) is not None:
898
+ for layer in self.layers:
899
+ with tf.name_scope(layer.name):
900
+ layer.build(None)
901
+
902
+
903
+ @keras_serializable
904
+ class TFBartDecoder(keras.layers.Layer):
905
+ config_class = BartConfig
906
+ """
907
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBartDecoderLayer`]
908
+
909
+ Args:
910
+ config: BartConfig
911
+ embed_tokens: output embedding
912
+ """
913
+
914
+ def __init__(self, config: BartConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
915
+ super().__init__(**kwargs)
916
+ self.config = config
917
+ self.padding_idx = config.pad_token_id
918
+ self.embed_tokens = embed_tokens
919
+ self.layerdrop = config.decoder_layerdrop
920
+ self.embed_positions = TFBartLearnedPositionalEmbedding(
921
+ config.max_position_embeddings,
922
+ config.d_model,
923
+ name="embed_positions",
924
+ )
925
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
926
+ self.layers = [TFBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
927
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
928
+
929
+ self.dropout = keras.layers.Dropout(config.dropout)
930
+
931
+ @unpack_inputs
932
+ def call(
933
+ self,
934
+ input_ids: TFModelInputType | None = None,
935
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
936
+ attention_mask: np.ndarray | tf.Tensor | None = None,
937
+ position_ids: np.ndarray | tf.Tensor | None = None,
938
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
939
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
940
+ head_mask: np.ndarray | tf.Tensor | None = None,
941
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
942
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
943
+ use_cache: Optional[bool] = None,
944
+ output_attentions: Optional[bool] = None,
945
+ output_hidden_states: Optional[bool] = None,
946
+ return_dict: Optional[bool] = None,
947
+ training: Optional[bool] = False,
948
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
949
+ r"""
950
+ Args:
951
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
952
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
953
+ provide it.
954
+
955
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
956
+ [`PreTrainedTokenizer.__call__`] for details.
957
+
958
+ [What are input IDs?](../glossary#input-ids)
959
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
960
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
961
+
962
+ - 1 for tokens that are **not masked**,
963
+ - 0 for tokens that are **masked**.
964
+
965
+ [What are attention masks?](../glossary#attention-mask)
966
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
967
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
968
+ range `[0, config.max_position_embeddings - 1]`.
969
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
970
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
971
+ of the decoder.
972
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
973
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
974
+ selected in `[0, 1]`:
975
+
976
+ - 1 for tokens that are **not masked**,
977
+ - 0 for tokens that are **masked**.
978
+
979
+ [What are attention masks?](../glossary#attention-mask)
980
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
981
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
982
+
983
+ - 1 indicates the head is **not masked**,
984
+ - 0 indicates the head is **masked**.
985
+
986
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
987
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
988
+
989
+ - 1 indicates the head is **not masked**,
990
+ - 0 indicates the head is **masked**.
991
+
992
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
993
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
994
+ decoding.
995
+
996
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
997
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
998
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
999
+ inputs_embeds (`tf.tTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1000
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
1001
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
1002
+ than the model's internal embedding lookup matrix.
1003
+ output_attentions (`bool`, *optional*):
1004
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1005
+ returned tensors for more detail.
1006
+ output_hidden_states (`bool`, *optional*):
1007
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1008
+ for more detail.
1009
+ return_dict (`bool`, *optional*):
1010
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1011
+ """
1012
+
1013
+ if input_ids is not None and inputs_embeds is not None:
1014
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1015
+ elif input_ids is not None:
1016
+ input_shape = shape_list(input_ids)
1017
+ elif inputs_embeds is not None:
1018
+ input_shape = shape_list(inputs_embeds)[:-1]
1019
+ else:
1020
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1021
+
1022
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
1023
+
1024
+ # embed positions
1025
+ if position_ids is None:
1026
+ positions = self.embed_positions(input_shape, past_key_values_length)
1027
+ else:
1028
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
1029
+
1030
+ if inputs_embeds is None:
1031
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
1032
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1033
+
1034
+ hidden_states = inputs_embeds
1035
+
1036
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1037
+ if input_shape[-1] > 1:
1038
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
1039
+ else:
1040
+ combined_attention_mask = _expand_mask(
1041
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
1042
+ )
1043
+
1044
+ if attention_mask is not None:
1045
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
1046
+
1047
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1048
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1049
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
1050
+
1051
+ hidden_states = self.layernorm_embedding(hidden_states + positions)
1052
+ hidden_states = self.dropout(hidden_states, training=training)
1053
+
1054
+ # decoder layers
1055
+ all_hidden_states = () if output_hidden_states else None
1056
+ all_self_attns = () if output_attentions else None
1057
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
1058
+ present_key_values = () if use_cache else None
1059
+
1060
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
1061
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
1062
+ if attn_mask is not None:
1063
+ tf.debugging.assert_equal(
1064
+ shape_list(attn_mask)[0],
1065
+ len(self.layers),
1066
+ message=(
1067
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
1068
+ f" {shape_list(attn_mask)[0]}."
1069
+ ),
1070
+ )
1071
+
1072
+ for idx, decoder_layer in enumerate(self.layers):
1073
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1074
+ if output_hidden_states:
1075
+ all_hidden_states += (hidden_states,)
1076
+
1077
+ dropout_probability = random.uniform(0, 1)
1078
+
1079
+ if training and (dropout_probability < self.layerdrop):
1080
+ continue
1081
+
1082
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1083
+
1084
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
1085
+ hidden_states,
1086
+ attention_mask=combined_attention_mask,
1087
+ encoder_hidden_states=encoder_hidden_states,
1088
+ encoder_attention_mask=encoder_attention_mask,
1089
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
1090
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1091
+ past_key_value=past_key_value,
1092
+ )
1093
+
1094
+ if use_cache:
1095
+ present_key_values += (present_key_value,)
1096
+
1097
+ if output_attentions:
1098
+ all_self_attns += (layer_self_attn,)
1099
+
1100
+ if encoder_hidden_states is not None:
1101
+ all_cross_attns += (layer_cross_attn,)
1102
+
1103
+ if output_hidden_states:
1104
+ all_hidden_states += (hidden_states,)
1105
+
1106
+ if not return_dict:
1107
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
1108
+ else:
1109
+ return TFBaseModelOutputWithPastAndCrossAttentions(
1110
+ last_hidden_state=hidden_states,
1111
+ past_key_values=present_key_values,
1112
+ hidden_states=all_hidden_states,
1113
+ attentions=all_self_attns,
1114
+ cross_attentions=all_cross_attns,
1115
+ )
1116
+
1117
+ def build(self, input_shape=None):
1118
+ if self.built:
1119
+ return
1120
+ self.built = True
1121
+ if getattr(self, "embed_positions", None) is not None:
1122
+ with tf.name_scope(self.embed_positions.name):
1123
+ self.embed_positions.build(None)
1124
+ if getattr(self, "layernorm_embedding", None) is not None:
1125
+ with tf.name_scope(self.layernorm_embedding.name):
1126
+ self.layernorm_embedding.build([None, None, self.config.d_model])
1127
+ if getattr(self, "layers", None) is not None:
1128
+ for layer in self.layers:
1129
+ with tf.name_scope(layer.name):
1130
+ layer.build(None)
1131
+
1132
+
1133
+ @keras_serializable
1134
+ class TFBartMainLayer(keras.layers.Layer):
1135
+ config_class = BartConfig
1136
+
1137
+ def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs):
1138
+ super().__init__(**kwargs)
1139
+ self.config = config
1140
+ self.shared = keras.layers.Embedding(
1141
+ input_dim=config.vocab_size,
1142
+ output_dim=config.d_model,
1143
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
1144
+ name="model.shared",
1145
+ )
1146
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
1147
+ self.shared.load_weight_prefix = "model.shared" if load_weight_prefix is None else load_weight_prefix
1148
+
1149
+ self.encoder = TFBartEncoder(config, self.shared, name="encoder")
1150
+ self.decoder = TFBartDecoder(config, self.shared, name="decoder")
1151
+
1152
+ def get_input_embeddings(self):
1153
+ return self.shared
1154
+
1155
+ def set_input_embeddings(self, new_embeddings):
1156
+ self.shared = new_embeddings
1157
+ self.encoder.embed_tokens = self.shared
1158
+ self.decoder.embed_tokens = self.shared
1159
+
1160
+ @unpack_inputs
1161
+ def call(
1162
+ self,
1163
+ input_ids: TFModelInputType | None = None,
1164
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1165
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1166
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1167
+ decoder_position_ids: np.ndarray | tf.Tensor | None = None,
1168
+ head_mask: np.ndarray | tf.Tensor | None = None,
1169
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1170
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1171
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1172
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1173
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1174
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1175
+ use_cache: Optional[bool] = None,
1176
+ output_attentions: Optional[bool] = None,
1177
+ output_hidden_states: Optional[bool] = None,
1178
+ return_dict: Optional[bool] = None,
1179
+ training: Optional[bool] = False,
1180
+ **kwargs,
1181
+ ) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]:
1182
+ # different to other models, Bart automatically creates decoder_input_ids from
1183
+ # input_ids if no decoder_input_ids are provided
1184
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1185
+ if input_ids is None:
1186
+ raise ValueError(
1187
+ "If no `decoder_input_ids` or `decoder_inputs_embeds` are "
1188
+ "passed, `input_ids` cannot be `None`. Please pass either "
1189
+ "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
1190
+ )
1191
+
1192
+ decoder_input_ids = shift_tokens_right(
1193
+ input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
1194
+ )
1195
+
1196
+ if encoder_outputs is None:
1197
+ encoder_outputs = self.encoder(
1198
+ input_ids=input_ids,
1199
+ attention_mask=attention_mask,
1200
+ head_mask=head_mask,
1201
+ inputs_embeds=inputs_embeds,
1202
+ output_attentions=output_attentions,
1203
+ output_hidden_states=output_hidden_states,
1204
+ return_dict=return_dict,
1205
+ training=training,
1206
+ )
1207
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
1208
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
1209
+ encoder_outputs = TFBaseModelOutput(
1210
+ last_hidden_state=encoder_outputs[0],
1211
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1212
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1213
+ )
1214
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
1215
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
1216
+ encoder_outputs = encoder_outputs.to_tuple()
1217
+
1218
+ decoder_outputs = self.decoder(
1219
+ decoder_input_ids,
1220
+ attention_mask=decoder_attention_mask,
1221
+ position_ids=decoder_position_ids,
1222
+ encoder_hidden_states=encoder_outputs[0],
1223
+ encoder_attention_mask=attention_mask,
1224
+ head_mask=decoder_head_mask,
1225
+ cross_attn_head_mask=cross_attn_head_mask,
1226
+ past_key_values=past_key_values,
1227
+ inputs_embeds=decoder_inputs_embeds,
1228
+ use_cache=use_cache,
1229
+ output_attentions=output_attentions,
1230
+ output_hidden_states=output_hidden_states,
1231
+ return_dict=return_dict,
1232
+ training=training,
1233
+ )
1234
+
1235
+ if not return_dict:
1236
+ return decoder_outputs + encoder_outputs
1237
+
1238
+ return TFSeq2SeqModelOutput(
1239
+ last_hidden_state=decoder_outputs.last_hidden_state,
1240
+ past_key_values=decoder_outputs.past_key_values,
1241
+ decoder_hidden_states=decoder_outputs.hidden_states,
1242
+ decoder_attentions=decoder_outputs.attentions,
1243
+ cross_attentions=decoder_outputs.cross_attentions,
1244
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1245
+ encoder_hidden_states=encoder_outputs.hidden_states,
1246
+ encoder_attentions=encoder_outputs.attentions,
1247
+ )
1248
+
1249
+ def build(self, input_shape=None):
1250
+ if self.built:
1251
+ return
1252
+ self.built = True
1253
+ # The shared/tied weights expect to be in the model base namespace
1254
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
1255
+ # the current one.
1256
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
1257
+ self.shared.build(None)
1258
+ if getattr(self, "encoder", None) is not None:
1259
+ with tf.name_scope(self.encoder.name):
1260
+ self.encoder.build(None)
1261
+ if getattr(self, "decoder", None) is not None:
1262
+ with tf.name_scope(self.decoder.name):
1263
+ self.decoder.build(None)
1264
+
1265
+
1266
+ @add_start_docstrings(
1267
+ "The bare BART Model outputting raw hidden-states without any specific head on top.",
1268
+ BART_START_DOCSTRING,
1269
+ )
1270
+ class TFBartModel(TFBartPretrainedModel):
1271
+ _requires_load_weight_prefix = True
1272
+
1273
+ def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs):
1274
+ super().__init__(config, *inputs, **kwargs)
1275
+
1276
+ self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model")
1277
+
1278
+ def get_encoder(self):
1279
+ return self.model.encoder
1280
+
1281
+ def get_decoder(self):
1282
+ return self.model.decoder
1283
+
1284
+ @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1285
+ @add_code_sample_docstrings(
1286
+ checkpoint=_CHECKPOINT_FOR_DOC,
1287
+ output_type=TFSeq2SeqModelOutput,
1288
+ config_class=_CONFIG_FOR_DOC,
1289
+ )
1290
+ @unpack_inputs
1291
+ def call(
1292
+ self,
1293
+ input_ids: TFModelInputType | None = None,
1294
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1295
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1296
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1297
+ decoder_position_ids: np.ndarray | tf.Tensor | None = None,
1298
+ head_mask: np.ndarray | tf.Tensor | None = None,
1299
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1300
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1301
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1302
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1303
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1304
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1305
+ use_cache: Optional[bool] = None,
1306
+ output_attentions: Optional[bool] = None,
1307
+ output_hidden_states: Optional[bool] = None,
1308
+ return_dict: Optional[bool] = None,
1309
+ training: Optional[bool] = False,
1310
+ **kwargs,
1311
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
1312
+ outputs = self.model(
1313
+ input_ids=input_ids,
1314
+ attention_mask=attention_mask,
1315
+ decoder_input_ids=decoder_input_ids,
1316
+ decoder_attention_mask=decoder_attention_mask,
1317
+ decoder_position_ids=decoder_position_ids,
1318
+ head_mask=head_mask,
1319
+ decoder_head_mask=decoder_head_mask,
1320
+ cross_attn_head_mask=cross_attn_head_mask,
1321
+ encoder_outputs=encoder_outputs,
1322
+ past_key_values=past_key_values,
1323
+ inputs_embeds=inputs_embeds,
1324
+ decoder_inputs_embeds=decoder_inputs_embeds,
1325
+ use_cache=use_cache,
1326
+ output_attentions=output_attentions,
1327
+ output_hidden_states=output_hidden_states,
1328
+ return_dict=return_dict,
1329
+ training=training,
1330
+ )
1331
+
1332
+ return outputs
1333
+
1334
+ def serving_output(self, output):
1335
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1336
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1337
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1338
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1339
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1340
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1341
+
1342
+ return TFSeq2SeqModelOutput(
1343
+ last_hidden_state=output.last_hidden_state,
1344
+ past_key_values=pkv,
1345
+ decoder_hidden_states=dec_hs,
1346
+ decoder_attentions=dec_attns,
1347
+ cross_attentions=cross_attns,
1348
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1349
+ encoder_hidden_states=enc_hs,
1350
+ encoder_attentions=enc_attns,
1351
+ )
1352
+
1353
+ def build(self, input_shape=None):
1354
+ if self.built:
1355
+ return
1356
+ self.built = True
1357
+ if getattr(self, "model", None) is not None:
1358
+ with tf.name_scope(self.model.name):
1359
+ self.model.build(None)
1360
+
1361
+
1362
+ class BiasLayer(keras.layers.Layer):
1363
+ """
1364
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
1365
+ so all weights have to be registered in a layer.
1366
+ """
1367
+
1368
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
1369
+ super().__init__(name=name, **kwargs)
1370
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
1371
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
1372
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
1373
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
1374
+
1375
+ def call(self, x):
1376
+ return x + self.bias
1377
+
1378
+
1379
+ @add_start_docstrings(
1380
+ "The BART Model with a language modeling head. Can be used for summarization.",
1381
+ BART_START_DOCSTRING,
1382
+ )
1383
+ class TFBartForConditionalGeneration(TFBartPretrainedModel, TFCausalLanguageModelingLoss):
1384
+ _keys_to_ignore_on_load_missing = [r"final_logits_bias"]
1385
+ _requires_load_weight_prefix = True
1386
+
1387
+ def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs):
1388
+ super().__init__(config, *inputs, **kwargs)
1389
+ self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model")
1390
+ self.use_cache = config.use_cache
1391
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
1392
+ self.bias_layer = BiasLayer(
1393
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
1394
+ )
1395
+
1396
+ def get_decoder(self):
1397
+ return self.model.decoder
1398
+
1399
+ def get_encoder(self):
1400
+ return self.model.encoder
1401
+
1402
+ def get_output_embeddings(self):
1403
+ return self.get_input_embeddings()
1404
+
1405
+ def set_output_embeddings(self, value):
1406
+ self.set_input_embeddings(value)
1407
+
1408
+ def get_bias(self):
1409
+ return {"final_logits_bias": self.bias_layer.bias}
1410
+
1411
+ def set_bias(self, value):
1412
+ # Replaces the existing layers containing bias for correct (de)serialization.
1413
+ vocab_size = value["final_logits_bias"].shape[-1]
1414
+ self.bias_layer = BiasLayer(
1415
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
1416
+ )
1417
+ self.bias_layer.bias.assign(value["final_logits_bias"])
1418
+
1419
+ @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
1420
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1421
+ @add_end_docstrings(BART_GENERATION_EXAMPLE)
1422
+ @unpack_inputs
1423
+ def call(
1424
+ self,
1425
+ input_ids: TFModelInputType | None = None,
1426
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1427
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1428
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1429
+ decoder_position_ids: np.ndarray | tf.Tensor | None = None,
1430
+ head_mask: np.ndarray | tf.Tensor | None = None,
1431
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1432
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1433
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
1434
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1435
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1436
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1437
+ use_cache: Optional[bool] = None,
1438
+ output_attentions: Optional[bool] = None,
1439
+ output_hidden_states: Optional[bool] = None,
1440
+ return_dict: Optional[bool] = None,
1441
+ labels: tf.Tensor | None = None,
1442
+ training: Optional[bool] = False,
1443
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
1444
+ r"""
1445
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1446
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1447
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1448
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1449
+
1450
+ Returns:
1451
+
1452
+ """
1453
+
1454
+ if labels is not None:
1455
+ labels = tf.where(
1456
+ labels == self.config.pad_token_id,
1457
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
1458
+ labels,
1459
+ )
1460
+ use_cache = False
1461
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1462
+ decoder_input_ids = shift_tokens_right(
1463
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1464
+ )
1465
+
1466
+ outputs = self.model(
1467
+ input_ids,
1468
+ attention_mask=attention_mask,
1469
+ decoder_input_ids=decoder_input_ids,
1470
+ encoder_outputs=encoder_outputs,
1471
+ decoder_attention_mask=decoder_attention_mask,
1472
+ decoder_position_ids=decoder_position_ids,
1473
+ head_mask=head_mask,
1474
+ decoder_head_mask=decoder_head_mask,
1475
+ cross_attn_head_mask=cross_attn_head_mask,
1476
+ past_key_values=past_key_values,
1477
+ inputs_embeds=inputs_embeds,
1478
+ decoder_inputs_embeds=decoder_inputs_embeds,
1479
+ use_cache=use_cache,
1480
+ output_attentions=output_attentions,
1481
+ output_hidden_states=output_hidden_states,
1482
+ return_dict=return_dict,
1483
+ training=training,
1484
+ )
1485
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
1486
+ lm_logits = self.bias_layer(lm_logits)
1487
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
1488
+
1489
+ if not return_dict:
1490
+ output = (lm_logits,) + outputs[1:]
1491
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1492
+ return TFSeq2SeqLMOutput(
1493
+ loss=masked_lm_loss,
1494
+ logits=lm_logits,
1495
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
1496
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
1497
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
1498
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
1499
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
1500
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
1501
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
1502
+ )
1503
+
1504
+ def serving_output(self, output):
1505
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1506
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1507
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1508
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1509
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1510
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1511
+
1512
+ return TFSeq2SeqLMOutput(
1513
+ logits=output.logits,
1514
+ past_key_values=pkv,
1515
+ decoder_hidden_states=dec_hs,
1516
+ decoder_attentions=dec_attns,
1517
+ cross_attentions=cross_attns,
1518
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1519
+ encoder_hidden_states=enc_hs,
1520
+ encoder_attentions=enc_attns,
1521
+ )
1522
+
1523
+ def prepare_inputs_for_generation(
1524
+ self,
1525
+ decoder_input_ids,
1526
+ past_key_values=None,
1527
+ attention_mask=None,
1528
+ decoder_attention_mask=None,
1529
+ head_mask=None,
1530
+ decoder_head_mask=None,
1531
+ cross_attn_head_mask=None,
1532
+ use_cache=None,
1533
+ encoder_outputs=None,
1534
+ **kwargs,
1535
+ ):
1536
+ # cut decoder_input_ids if past_key_values is used
1537
+ if past_key_values is not None:
1538
+ decoder_input_ids = decoder_input_ids[:, -1:]
1539
+
1540
+ if decoder_attention_mask is not None: # xla
1541
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
1542
+ elif past_key_values is not None: # no xla + past_key_values
1543
+ decoder_position_ids = past_key_values[0][0].shape[2]
1544
+ else: # no xla + no past_key_values
1545
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
1546
+
1547
+ return {
1548
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1549
+ "encoder_outputs": encoder_outputs,
1550
+ "past_key_values": past_key_values,
1551
+ "decoder_input_ids": decoder_input_ids,
1552
+ "attention_mask": attention_mask,
1553
+ "decoder_attention_mask": decoder_attention_mask,
1554
+ "decoder_position_ids": decoder_position_ids,
1555
+ "head_mask": head_mask,
1556
+ "decoder_head_mask": decoder_head_mask,
1557
+ "cross_attn_head_mask": cross_attn_head_mask,
1558
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1559
+ }
1560
+
1561
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
1562
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
1563
+
1564
+ def build(self, input_shape=None):
1565
+ if self.built:
1566
+ return
1567
+ self.built = True
1568
+ if getattr(self, "model", None) is not None:
1569
+ with tf.name_scope(self.model.name):
1570
+ self.model.build(None)
1571
+ if getattr(self, "bias_layer", None) is not None:
1572
+ with tf.name_scope(self.bias_layer.name):
1573
+ self.bias_layer.build(None)
1574
+
1575
+
1576
+ @add_start_docstrings(
1577
+ """
1578
+ Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
1579
+ tasks.
1580
+ """,
1581
+ BART_START_DOCSTRING,
1582
+ )
1583
+ class TFBartForSequenceClassification(TFBartPretrainedModel, TFSequenceClassificationLoss):
1584
+ def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs):
1585
+ super().__init__(config, *inputs, **kwargs)
1586
+ self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model")
1587
+ self.classification_head = TFBartClassificationHead(
1588
+ config.d_model, config.num_labels, config.classifier_dropout, name="classification_head"
1589
+ )
1590
+
1591
+ @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)
1592
+ @replace_return_docstrings(output_type=TFSeq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1593
+ @unpack_inputs
1594
+ def call(
1595
+ self,
1596
+ input_ids: TFModelInputType | None = None,
1597
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1598
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
1599
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1600
+ decoder_position_ids: np.ndarray | tf.Tensor | None = None,
1601
+ head_mask: np.ndarray | tf.Tensor | None = None,
1602
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
1603
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
1604
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
1605
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1606
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1607
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
1608
+ use_cache: Optional[bool] = None,
1609
+ output_attentions: Optional[bool] = None,
1610
+ output_hidden_states: Optional[bool] = None,
1611
+ return_dict: Optional[bool] = None,
1612
+ labels: tf.Tensor | None = None,
1613
+ training: Optional[bool] = False,
1614
+ ) -> Union[TFSeq2SeqSequenceClassifierOutput, Tuple[tf.Tensor]]:
1615
+ r"""
1616
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1617
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1618
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1619
+
1620
+ Returns:
1621
+ """
1622
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1623
+ if labels is not None:
1624
+ use_cache = False
1625
+
1626
+ if input_ids is None and inputs_embeds is not None:
1627
+ raise NotImplementedError(
1628
+ f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
1629
+ )
1630
+
1631
+ outputs = self.model(
1632
+ input_ids=input_ids,
1633
+ attention_mask=attention_mask,
1634
+ decoder_input_ids=decoder_input_ids,
1635
+ decoder_attention_mask=decoder_attention_mask,
1636
+ decoder_position_ids=decoder_position_ids,
1637
+ head_mask=head_mask,
1638
+ decoder_head_mask=decoder_head_mask,
1639
+ cross_attn_head_mask=cross_attn_head_mask,
1640
+ encoder_outputs=encoder_outputs,
1641
+ past_key_values=past_key_values,
1642
+ inputs_embeds=inputs_embeds,
1643
+ decoder_inputs_embeds=decoder_inputs_embeds,
1644
+ use_cache=use_cache,
1645
+ output_attentions=output_attentions,
1646
+ output_hidden_states=output_hidden_states,
1647
+ return_dict=return_dict,
1648
+ training=training,
1649
+ )
1650
+
1651
+ last_hidden_state = outputs[0]
1652
+ eos_mask = tf.equal(input_ids, self.config.eos_token_id)
1653
+ # out the rows with False where present. Then verify all the final
1654
+ # entries are True
1655
+ self_masked = tf.reshape(tf.boolean_mask(eos_mask, eos_mask), (tf.shape(input_ids)[0], -1))
1656
+ tf.Assert(tf.reduce_all(self_masked[:, -1]), ["All examples must have the same number of <eos> tokens."])
1657
+
1658
+ masked = tf.reshape(
1659
+ tf.boolean_mask(last_hidden_state, eos_mask),
1660
+ (tf.shape(input_ids)[0], tf.shape(self_masked)[1], tf.shape(last_hidden_state)[-1]),
1661
+ )
1662
+
1663
+ sentence_representation = masked[:, -1, :]
1664
+ logits = self.classification_head(sentence_representation)
1665
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1666
+
1667
+ if not return_dict:
1668
+ output = (logits,) + outputs[1:]
1669
+ return ((loss,) + output) if loss is not None else output
1670
+
1671
+ return TFSeq2SeqSequenceClassifierOutput(
1672
+ loss=loss,
1673
+ logits=logits,
1674
+ past_key_values=outputs.past_key_values,
1675
+ decoder_hidden_states=outputs.decoder_hidden_states,
1676
+ decoder_attentions=outputs.decoder_attentions,
1677
+ cross_attentions=outputs.cross_attentions,
1678
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1679
+ encoder_hidden_states=outputs.encoder_hidden_states,
1680
+ encoder_attentions=outputs.encoder_attentions,
1681
+ )
1682
+
1683
+ def serving_output(self, output):
1684
+ logits = tf.convert_to_tensor(output.logits)
1685
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1686
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1687
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1688
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1689
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1690
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1691
+
1692
+ return TFSeq2SeqSequenceClassifierOutput(
1693
+ logits=logits,
1694
+ past_key_values=pkv,
1695
+ decoder_hidden_states=dec_hs,
1696
+ decoder_attentions=dec_attns,
1697
+ cross_attentions=cross_attns,
1698
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1699
+ encoder_hidden_states=enc_hs,
1700
+ encoder_attentions=enc_attns,
1701
+ )
1702
+
1703
+ def build(self, input_shape=None):
1704
+ if self.built:
1705
+ return
1706
+ self.built = True
1707
+ if getattr(self, "model", None) is not None:
1708
+ with tf.name_scope(self.model.name):
1709
+ self.model.build(None)
1710
+ if getattr(self, "classification_head", None) is not None:
1711
+ with tf.name_scope(self.classification_head.name):
1712
+ self.classification_head.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/tokenization_bart.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from functools import lru_cache
19
+ from typing import List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
31
+
32
+ # See all BART models at https://huggingface.co/models?filter=bart
33
+
34
+
35
+ @lru_cache()
36
+ def bytes_to_unicode():
37
+ """
38
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
39
+ characters the bpe code barfs on.
40
+
41
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
42
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
43
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
44
+ tables between utf-8 bytes and unicode strings.
45
+ """
46
+ bs = (
47
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
48
+ )
49
+ cs = bs[:]
50
+ n = 0
51
+ for b in range(2**8):
52
+ if b not in bs:
53
+ bs.append(b)
54
+ cs.append(2**8 + n)
55
+ n += 1
56
+ cs = [chr(n) for n in cs]
57
+ return dict(zip(bs, cs))
58
+
59
+
60
+ def get_pairs(word):
61
+ """
62
+ Return set of symbol pairs in a word.
63
+
64
+ Word is represented as tuple of symbols (symbols being variable-length strings).
65
+ """
66
+ pairs = set()
67
+ prev_char = word[0]
68
+ for char in word[1:]:
69
+ pairs.add((prev_char, char))
70
+ prev_char = char
71
+ return pairs
72
+
73
+
74
+ class BartTokenizer(PreTrainedTokenizer):
75
+ """
76
+ Constructs a BART tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding.
77
+
78
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
79
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
80
+
81
+ ```python
82
+ >>> from transformers import BartTokenizer
83
+
84
+ >>> tokenizer = BartTokenizer.from_pretrained("facebook/bart-base")
85
+ >>> tokenizer("Hello world")["input_ids"]
86
+ [0, 31414, 232, 2]
87
+
88
+ >>> tokenizer(" Hello world")["input_ids"]
89
+ [0, 20920, 232, 2]
90
+ ```
91
+
92
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
93
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
94
+
95
+ <Tip>
96
+
97
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
98
+
99
+ </Tip>
100
+
101
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
102
+ this superclass for more information regarding those methods.
103
+
104
+ Args:
105
+ vocab_file (`str`):
106
+ Path to the vocabulary file.
107
+ merges_file (`str`):
108
+ Path to the merges file.
109
+ errors (`str`, *optional*, defaults to `"replace"`):
110
+ Paradigm to follow when decoding bytes to UTF-8. See
111
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
112
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
113
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
114
+
115
+ <Tip>
116
+
117
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
118
+ sequence. The token used is the `cls_token`.
119
+
120
+ </Tip>
121
+
122
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
123
+ The end of sequence token.
124
+
125
+ <Tip>
126
+
127
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
128
+ The token used is the `sep_token`.
129
+
130
+ </Tip>
131
+
132
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
133
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
134
+ sequence classification or for a text and a question for question answering. It is also used as the last
135
+ token of a sequence built with special tokens.
136
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
137
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
138
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
139
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
140
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
141
+ token instead.
142
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
143
+ The token used for padding, for example when batching sequences of different lengths.
144
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
145
+ The token used for masking values. This is the token used when training this model with masked language
146
+ modeling. This is the token which the model will try to predict.
147
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
148
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
149
+ other word. (BART tokenizer detect beginning of words by the preceding space).
150
+ """
151
+
152
+ vocab_files_names = VOCAB_FILES_NAMES
153
+ model_input_names = ["input_ids", "attention_mask"]
154
+
155
+ def __init__(
156
+ self,
157
+ vocab_file,
158
+ merges_file,
159
+ errors="replace",
160
+ bos_token="<s>",
161
+ eos_token="</s>",
162
+ sep_token="</s>",
163
+ cls_token="<s>",
164
+ unk_token="<unk>",
165
+ pad_token="<pad>",
166
+ mask_token="<mask>",
167
+ add_prefix_space=False,
168
+ **kwargs,
169
+ ):
170
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
171
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
172
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
173
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
174
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
175
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
176
+
177
+ # Mask token behave like a normal word, i.e. include the space before it
178
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
179
+
180
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
181
+ self.encoder = json.load(vocab_handle)
182
+ self.decoder = {v: k for k, v in self.encoder.items()}
183
+ self.errors = errors # how to handle errors in decoding
184
+ self.byte_encoder = bytes_to_unicode()
185
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
186
+ with open(merges_file, encoding="utf-8") as merges_handle:
187
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
188
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
189
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
190
+ self.cache = {}
191
+ self.add_prefix_space = add_prefix_space
192
+
193
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
194
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
195
+
196
+ super().__init__(
197
+ errors=errors,
198
+ bos_token=bos_token,
199
+ eos_token=eos_token,
200
+ unk_token=unk_token,
201
+ sep_token=sep_token,
202
+ cls_token=cls_token,
203
+ pad_token=pad_token,
204
+ mask_token=mask_token,
205
+ add_prefix_space=add_prefix_space,
206
+ **kwargs,
207
+ )
208
+
209
+ @property
210
+ def vocab_size(self):
211
+ return len(self.encoder)
212
+
213
+ def get_vocab(self):
214
+ return dict(self.encoder, **self.added_tokens_encoder)
215
+
216
+ def bpe(self, token):
217
+ if token in self.cache:
218
+ return self.cache[token]
219
+ word = tuple(token)
220
+ pairs = get_pairs(word)
221
+
222
+ if not pairs:
223
+ return token
224
+
225
+ while True:
226
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
227
+ if bigram not in self.bpe_ranks:
228
+ break
229
+ first, second = bigram
230
+ new_word = []
231
+ i = 0
232
+ while i < len(word):
233
+ try:
234
+ j = word.index(first, i)
235
+ except ValueError:
236
+ new_word.extend(word[i:])
237
+ break
238
+ else:
239
+ new_word.extend(word[i:j])
240
+ i = j
241
+
242
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
243
+ new_word.append(first + second)
244
+ i += 2
245
+ else:
246
+ new_word.append(word[i])
247
+ i += 1
248
+ new_word = tuple(new_word)
249
+ word = new_word
250
+ if len(word) == 1:
251
+ break
252
+ else:
253
+ pairs = get_pairs(word)
254
+ word = " ".join(word)
255
+ self.cache[token] = word
256
+ return word
257
+
258
+ def _tokenize(self, text):
259
+ """Tokenize a string."""
260
+ bpe_tokens = []
261
+ for token in re.findall(self.pat, text):
262
+ token = "".join(
263
+ self.byte_encoder[b] for b in token.encode("utf-8")
264
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
265
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
266
+ return bpe_tokens
267
+
268
+ def _convert_token_to_id(self, token):
269
+ """Converts a token (str) in an id using the vocab."""
270
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
271
+
272
+ def _convert_id_to_token(self, index):
273
+ """Converts an index (integer) in a token (str) using the vocab."""
274
+ return self.decoder.get(index)
275
+
276
+ def convert_tokens_to_string(self, tokens):
277
+ """Converts a sequence of tokens (string) in a single string."""
278
+ text = "".join(tokens)
279
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
280
+ return text
281
+
282
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
283
+ if not os.path.isdir(save_directory):
284
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
285
+ return
286
+ vocab_file = os.path.join(
287
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
288
+ )
289
+ merge_file = os.path.join(
290
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
291
+ )
292
+
293
+ with open(vocab_file, "w", encoding="utf-8") as f:
294
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
295
+
296
+ index = 0
297
+ with open(merge_file, "w", encoding="utf-8") as writer:
298
+ writer.write("#version: 0.2\n")
299
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
300
+ if index != token_index:
301
+ logger.warning(
302
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
303
+ " Please check that the tokenizer is not corrupted!"
304
+ )
305
+ index = token_index
306
+ writer.write(" ".join(bpe_tokens) + "\n")
307
+ index += 1
308
+
309
+ return vocab_file, merge_file
310
+
311
+ def build_inputs_with_special_tokens(
312
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
313
+ ) -> List[int]:
314
+ """
315
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
316
+ adding special tokens. A BART sequence has the following format:
317
+
318
+ - single sequence: `<s> X </s>`
319
+ - pair of sequences: `<s> A </s></s> B </s>`
320
+
321
+ Args:
322
+ token_ids_0 (`List[int]`):
323
+ List of IDs to which the special tokens will be added.
324
+ token_ids_1 (`List[int]`, *optional*):
325
+ Optional second list of IDs for sequence pairs.
326
+
327
+ Returns:
328
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
329
+ """
330
+ if token_ids_1 is None:
331
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
332
+ cls = [self.cls_token_id]
333
+ sep = [self.sep_token_id]
334
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
335
+
336
+ def get_special_tokens_mask(
337
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
338
+ ) -> List[int]:
339
+ """
340
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
341
+ special tokens using the tokenizer `prepare_for_model` method.
342
+
343
+ Args:
344
+ token_ids_0 (`List[int]`):
345
+ List of IDs.
346
+ token_ids_1 (`List[int]`, *optional*):
347
+ Optional second list of IDs for sequence pairs.
348
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
349
+ Whether or not the token list is already formatted with special tokens for the model.
350
+
351
+ Returns:
352
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
353
+ """
354
+ if already_has_special_tokens:
355
+ return super().get_special_tokens_mask(
356
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
357
+ )
358
+
359
+ if token_ids_1 is None:
360
+ return [1] + ([0] * len(token_ids_0)) + [1]
361
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
362
+
363
+ def create_token_type_ids_from_sequences(
364
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
365
+ ) -> List[int]:
366
+ """
367
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BART does not
368
+ make use of token type ids, therefore a list of zeros is returned.
369
+
370
+ Args:
371
+ token_ids_0 (`List[int]`):
372
+ List of IDs.
373
+ token_ids_1 (`List[int]`, *optional*):
374
+ Optional second list of IDs for sequence pairs.
375
+
376
+ Returns:
377
+ `List[int]`: List of zeros.
378
+ """
379
+ sep = [self.sep_token_id]
380
+ cls = [self.cls_token_id]
381
+
382
+ if token_ids_1 is None:
383
+ return len(cls + token_ids_0 + sep) * [0]
384
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
385
+
386
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
387
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
388
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
389
+ text = " " + text
390
+ return (text, kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/bart/tokenization_bart_fast.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import pre_tokenizers, processors
20
+
21
+ from ...tokenization_utils_base import AddedToken, BatchEncoding
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_bart import BartTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+ # See all BART models at https://huggingface.co/models?filter=bart
33
+
34
+
35
+ class BartTokenizerFast(PreTrainedTokenizerFast):
36
+ r"""
37
+ Construct a "fast" BART tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer,
38
+ using byte-level Byte-Pair-Encoding.
39
+
40
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
41
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
42
+
43
+ ```python
44
+ >>> from transformers import BartTokenizerFast
45
+
46
+ >>> tokenizer = BartTokenizerFast.from_pretrained("facebook/bart-base")
47
+ >>> tokenizer("Hello world")["input_ids"]
48
+ [0, 31414, 232, 2]
49
+
50
+ >>> tokenizer(" Hello world")["input_ids"]
51
+ [0, 20920, 232, 2]
52
+ ```
53
+
54
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
55
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
56
+
57
+ <Tip>
58
+
59
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
60
+
61
+ </Tip>
62
+
63
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
64
+ refer to this superclass for more information regarding those methods.
65
+
66
+ Args:
67
+ vocab_file (`str`):
68
+ Path to the vocabulary file.
69
+ merges_file (`str`):
70
+ Path to the merges file.
71
+ errors (`str`, *optional*, defaults to `"replace"`):
72
+ Paradigm to follow when decoding bytes to UTF-8. See
73
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
74
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
75
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
76
+
77
+ <Tip>
78
+
79
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
80
+ sequence. The token used is the `cls_token`.
81
+
82
+ </Tip>
83
+
84
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
85
+ The end of sequence token.
86
+
87
+ <Tip>
88
+
89
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
90
+ The token used is the `sep_token`.
91
+
92
+ </Tip>
93
+
94
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
95
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
96
+ sequence classification or for a text and a question for question answering. It is also used as the last
97
+ token of a sequence built with special tokens.
98
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
99
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
100
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
101
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
102
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
103
+ token instead.
104
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
105
+ The token used for padding, for example when batching sequences of different lengths.
106
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
107
+ The token used for masking values. This is the token used when training this model with masked language
108
+ modeling. This is the token which the model will try to predict.
109
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
111
+ other word. (BART tokenizer detect beginning of words by the preceding space).
112
+ trim_offsets (`bool`, *optional*, defaults to `True`):
113
+ Whether the post processing step should trim offsets to avoid including whitespaces.
114
+ """
115
+
116
+ vocab_files_names = VOCAB_FILES_NAMES
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+ slow_tokenizer_class = BartTokenizer
119
+
120
+ def __init__(
121
+ self,
122
+ vocab_file=None,
123
+ merges_file=None,
124
+ tokenizer_file=None,
125
+ errors="replace",
126
+ bos_token="<s>",
127
+ eos_token="</s>",
128
+ sep_token="</s>",
129
+ cls_token="<s>",
130
+ unk_token="<unk>",
131
+ pad_token="<pad>",
132
+ mask_token="<mask>",
133
+ add_prefix_space=False,
134
+ trim_offsets=True,
135
+ **kwargs,
136
+ ):
137
+ # we have to specify that this tokens is special otherwise adding it will reset the normalized flag to `False` in `add_special_tokens`
138
+ mask_token = (
139
+ AddedToken(mask_token, lstrip=True, normalized=True, special=True)
140
+ if isinstance(mask_token, str)
141
+ else mask_token
142
+ )
143
+ super().__init__(
144
+ vocab_file,
145
+ merges_file,
146
+ tokenizer_file=tokenizer_file,
147
+ errors=errors,
148
+ bos_token=bos_token,
149
+ eos_token=eos_token,
150
+ sep_token=sep_token,
151
+ cls_token=cls_token,
152
+ unk_token=unk_token,
153
+ pad_token=pad_token,
154
+ mask_token=mask_token,
155
+ add_prefix_space=add_prefix_space,
156
+ trim_offsets=trim_offsets,
157
+ **kwargs,
158
+ )
159
+
160
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
161
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
162
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
163
+ pre_tok_state["add_prefix_space"] = add_prefix_space
164
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
165
+
166
+ self.add_prefix_space = add_prefix_space
167
+
168
+ # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
169
+ tokenizer_component = "post_processor"
170
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
171
+ if tokenizer_component_instance:
172
+ state = json.loads(tokenizer_component_instance.__getstate__())
173
+
174
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
175
+ if "sep" in state:
176
+ state["sep"] = tuple(state["sep"])
177
+ if "cls" in state:
178
+ state["cls"] = tuple(state["cls"])
179
+
180
+ changes_to_apply = False
181
+
182
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
183
+ state["add_prefix_space"] = add_prefix_space
184
+ changes_to_apply = True
185
+
186
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
187
+ state["trim_offsets"] = trim_offsets
188
+ changes_to_apply = True
189
+
190
+ if changes_to_apply:
191
+ component_class = getattr(processors, state.pop("type"))
192
+ new_value = component_class(**state)
193
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
194
+
195
+ @property
196
+ def mask_token(self) -> str:
197
+ """
198
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
199
+ having been set.
200
+
201
+ BART tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
202
+ comprise the space before the *<mask>*.
203
+ """
204
+ if self._mask_token is None:
205
+ if self.verbose:
206
+ logger.error("Using mask_token, but it is not set yet.")
207
+ return None
208
+ return str(self._mask_token)
209
+
210
+ @mask_token.setter
211
+ def mask_token(self, value):
212
+ """
213
+ Overriding the default behavior of the mask token to have it eat the space before it.
214
+
215
+ This is needed to preserve backward compatibility with all the previously used models based on Bart.
216
+ """
217
+ # Mask token behave like a normal word, i.e. include the space before it
218
+ # So we set lstrip to True
219
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
220
+ self._mask_token = value
221
+
222
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
223
+ is_split_into_words = kwargs.get("is_split_into_words", False)
224
+
225
+ if is_split_into_words and not self.add_prefix_space:
226
+ raise ValueError(
227
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
228
+ "to use it with pretokenized inputs."
229
+ )
230
+
231
+ return super()._batch_encode_plus(*args, **kwargs)
232
+
233
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
234
+ is_split_into_words = kwargs.get("is_split_into_words", False)
235
+
236
+ if is_split_into_words and not self.add_prefix_space:
237
+ raise ValueError(
238
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
239
+ "to use it with pretokenized inputs."
240
+ )
241
+
242
+ return super()._encode_plus(*args, **kwargs)
243
+
244
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
245
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
246
+ return tuple(files)
247
+
248
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
249
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
250
+ if token_ids_1 is None:
251
+ return output
252
+
253
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
254
+
255
+ def create_token_type_ids_from_sequences(
256
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
257
+ ) -> List[int]:
258
+ """
259
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BART does not
260
+ make use of token type ids, therefore a list of zeros is returned.
261
+
262
+ Args:
263
+ token_ids_0 (`List[int]`):
264
+ List of IDs.
265
+ token_ids_1 (`List[int]`, *optional*):
266
+ Optional second list of IDs for sequence pairs.
267
+
268
+ Returns:
269
+ `List[int]`: List of zeros.
270
+ """
271
+ sep = [self.sep_token_id]
272
+ cls = [self.cls_token_id]
273
+
274
+ if token_ids_1 is None:
275
+ return len(cls + token_ids_0 + sep) * [0]
276
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__init__.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tensorflow_text_available,
22
+ is_tf_available,
23
+ is_tokenizers_available,
24
+ is_torch_available,
25
+ )
26
+
27
+
28
+ _import_structure = {
29
+ "configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
30
+ "tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
31
+ }
32
+
33
+ try:
34
+ if not is_tokenizers_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["tokenization_bert_fast"] = ["BertTokenizerFast"]
40
+
41
+ try:
42
+ if not is_torch_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ _import_structure["modeling_bert"] = [
48
+ "BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
49
+ "BertForMaskedLM",
50
+ "BertForMultipleChoice",
51
+ "BertForNextSentencePrediction",
52
+ "BertForPreTraining",
53
+ "BertForQuestionAnswering",
54
+ "BertForSequenceClassification",
55
+ "BertForTokenClassification",
56
+ "BertLayer",
57
+ "BertLMHeadModel",
58
+ "BertModel",
59
+ "BertPreTrainedModel",
60
+ "load_tf_weights_in_bert",
61
+ ]
62
+
63
+ try:
64
+ if not is_tf_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ _import_structure["modeling_tf_bert"] = [
70
+ "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
71
+ "TFBertEmbeddings",
72
+ "TFBertForMaskedLM",
73
+ "TFBertForMultipleChoice",
74
+ "TFBertForNextSentencePrediction",
75
+ "TFBertForPreTraining",
76
+ "TFBertForQuestionAnswering",
77
+ "TFBertForSequenceClassification",
78
+ "TFBertForTokenClassification",
79
+ "TFBertLMHeadModel",
80
+ "TFBertMainLayer",
81
+ "TFBertModel",
82
+ "TFBertPreTrainedModel",
83
+ ]
84
+ try:
85
+ if not is_tensorflow_text_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ _import_structure["tokenization_bert_tf"] = ["TFBertTokenizer"]
91
+
92
+ try:
93
+ if not is_flax_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ _import_structure["modeling_flax_bert"] = [
99
+ "FlaxBertForCausalLM",
100
+ "FlaxBertForMaskedLM",
101
+ "FlaxBertForMultipleChoice",
102
+ "FlaxBertForNextSentencePrediction",
103
+ "FlaxBertForPreTraining",
104
+ "FlaxBertForQuestionAnswering",
105
+ "FlaxBertForSequenceClassification",
106
+ "FlaxBertForTokenClassification",
107
+ "FlaxBertModel",
108
+ "FlaxBertPreTrainedModel",
109
+ ]
110
+
111
+ if TYPE_CHECKING:
112
+ from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
113
+ from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
114
+
115
+ try:
116
+ if not is_tokenizers_available():
117
+ raise OptionalDependencyNotAvailable()
118
+ except OptionalDependencyNotAvailable:
119
+ pass
120
+ else:
121
+ from .tokenization_bert_fast import BertTokenizerFast
122
+
123
+ try:
124
+ if not is_torch_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .modeling_bert import (
130
+ BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
131
+ BertForMaskedLM,
132
+ BertForMultipleChoice,
133
+ BertForNextSentencePrediction,
134
+ BertForPreTraining,
135
+ BertForQuestionAnswering,
136
+ BertForSequenceClassification,
137
+ BertForTokenClassification,
138
+ BertLayer,
139
+ BertLMHeadModel,
140
+ BertModel,
141
+ BertPreTrainedModel,
142
+ load_tf_weights_in_bert,
143
+ )
144
+
145
+ try:
146
+ if not is_tf_available():
147
+ raise OptionalDependencyNotAvailable()
148
+ except OptionalDependencyNotAvailable:
149
+ pass
150
+ else:
151
+ from .modeling_tf_bert import (
152
+ TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
153
+ TFBertEmbeddings,
154
+ TFBertForMaskedLM,
155
+ TFBertForMultipleChoice,
156
+ TFBertForNextSentencePrediction,
157
+ TFBertForPreTraining,
158
+ TFBertForQuestionAnswering,
159
+ TFBertForSequenceClassification,
160
+ TFBertForTokenClassification,
161
+ TFBertLMHeadModel,
162
+ TFBertMainLayer,
163
+ TFBertModel,
164
+ TFBertPreTrainedModel,
165
+ )
166
+
167
+ try:
168
+ if not is_tensorflow_text_available():
169
+ raise OptionalDependencyNotAvailable()
170
+ except OptionalDependencyNotAvailable:
171
+ pass
172
+ else:
173
+ from .tokenization_bert_tf import TFBertTokenizer
174
+
175
+ try:
176
+ if not is_flax_available():
177
+ raise OptionalDependencyNotAvailable()
178
+ except OptionalDependencyNotAvailable:
179
+ pass
180
+ else:
181
+ from .modeling_flax_bert import (
182
+ FlaxBertForCausalLM,
183
+ FlaxBertForMaskedLM,
184
+ FlaxBertForMultipleChoice,
185
+ FlaxBertForNextSentencePrediction,
186
+ FlaxBertForPreTraining,
187
+ FlaxBertForQuestionAnswering,
188
+ FlaxBertForSequenceClassification,
189
+ FlaxBertForTokenClassification,
190
+ FlaxBertModel,
191
+ FlaxBertPreTrainedModel,
192
+ )
193
+
194
+ else:
195
+ import sys
196
+
197
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/configuration_bert.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ BERT model configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class BertConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to
34
+ instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a
35
+ configuration with the defaults will yield a similar configuration to that of the BERT
36
+ [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 30522):
44
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`].
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
69
+ The epsilon used by the layer normalization layers.
70
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
71
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
72
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
73
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
74
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
75
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
76
+ is_decoder (`bool`, *optional*, defaults to `False`):
77
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ classifier_dropout (`float`, *optional*):
82
+ The dropout ratio for the classification head.
83
+
84
+ Examples:
85
+
86
+ ```python
87
+ >>> from transformers import BertConfig, BertModel
88
+
89
+ >>> # Initializing a BERT google-bert/bert-base-uncased style configuration
90
+ >>> configuration = BertConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration
93
+ >>> model = BertModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "bert"
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=30522,
104
+ hidden_size=768,
105
+ num_hidden_layers=12,
106
+ num_attention_heads=12,
107
+ intermediate_size=3072,
108
+ hidden_act="gelu",
109
+ hidden_dropout_prob=0.1,
110
+ attention_probs_dropout_prob=0.1,
111
+ max_position_embeddings=512,
112
+ type_vocab_size=2,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ pad_token_id=0,
116
+ position_embedding_type="absolute",
117
+ use_cache=True,
118
+ classifier_dropout=None,
119
+ **kwargs,
120
+ ):
121
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
122
+
123
+ self.vocab_size = vocab_size
124
+ self.hidden_size = hidden_size
125
+ self.num_hidden_layers = num_hidden_layers
126
+ self.num_attention_heads = num_attention_heads
127
+ self.hidden_act = hidden_act
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_dropout_prob = hidden_dropout_prob
130
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.type_vocab_size = type_vocab_size
133
+ self.initializer_range = initializer_range
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.position_embedding_type = position_embedding_type
136
+ self.use_cache = use_cache
137
+ self.classifier_dropout = classifier_dropout
138
+
139
+
140
+ class BertOnnxConfig(OnnxConfig):
141
+ @property
142
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
143
+ if self.task == "multiple-choice":
144
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
145
+ else:
146
+ dynamic_axis = {0: "batch", 1: "sequence"}
147
+ return OrderedDict(
148
+ [
149
+ ("input_ids", dynamic_axis),
150
+ ("attention_mask", dynamic_axis),
151
+ ("token_type_ids", dynamic_axis),
152
+ ]
153
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script can be used to convert a head-less TF2.x Bert model to PyTorch, as published on the official (now
17
+ deprecated) GitHub: https://github.com/tensorflow/models/tree/v2.3.0/official/nlp/bert
18
+
19
+ TF2.x uses different variable names from the original BERT (TF 1.4) implementation. The script re-maps the TF2.x Bert
20
+ weight names to the original names, so the model can be imported with Huggingface/transformer.
21
+
22
+ You may adapt this script to include classification/MLM/NSP/etc. heads.
23
+
24
+ Note: This script is only working with an older version of the TensorFlow models repository (<= v2.3.0).
25
+ Models trained with never versions are not compatible with this script.
26
+ """
27
+ import argparse
28
+ import os
29
+ import re
30
+
31
+ import tensorflow as tf
32
+ import torch
33
+
34
+ from transformers import BertConfig, BertModel
35
+ from transformers.utils import logging
36
+
37
+
38
+ logging.set_verbosity_info()
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ def load_tf2_weights_in_bert(model, tf_checkpoint_path, config):
43
+ tf_path = os.path.abspath(tf_checkpoint_path)
44
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
45
+ # Load weights from TF model
46
+ init_vars = tf.train.list_variables(tf_path)
47
+ names = []
48
+ arrays = []
49
+ layer_depth = []
50
+ for full_name, shape in init_vars:
51
+ # logger.info(f"Loading TF weight {name} with shape {shape}")
52
+ name = full_name.split("/")
53
+ if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
54
+ logger.info(f"Skipping non-model layer {full_name}")
55
+ continue
56
+ if "optimizer" in full_name:
57
+ logger.info(f"Skipping optimization layer {full_name}")
58
+ continue
59
+ if name[0] == "model":
60
+ # ignore initial 'model'
61
+ name = name[1:]
62
+ # figure out how many levels deep the name is
63
+ depth = 0
64
+ for _name in name:
65
+ if _name.startswith("layer_with_weights"):
66
+ depth += 1
67
+ else:
68
+ break
69
+ layer_depth.append(depth)
70
+ # read data
71
+ array = tf.train.load_variable(tf_path, full_name)
72
+ names.append("/".join(name))
73
+ arrays.append(array)
74
+ logger.info(f"Read a total of {len(arrays):,} layers")
75
+
76
+ # Sanity check
77
+ if len(set(layer_depth)) != 1:
78
+ raise ValueError(f"Found layer names with different depths (layer depth {list(set(layer_depth))})")
79
+ layer_depth = list(set(layer_depth))[0]
80
+ if layer_depth != 1:
81
+ raise ValueError(
82
+ "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
83
+ " heads."
84
+ )
85
+
86
+ # convert layers
87
+ logger.info("Converting weights...")
88
+ for full_name, array in zip(names, arrays):
89
+ name = full_name.split("/")
90
+ pointer = model
91
+ trace = []
92
+ for i, m_name in enumerate(name):
93
+ if m_name == ".ATTRIBUTES":
94
+ # variable names end with .ATTRIBUTES/VARIABLE_VALUE
95
+ break
96
+ if m_name.startswith("layer_with_weights"):
97
+ layer_num = int(m_name.split("-")[-1])
98
+ if layer_num <= 2:
99
+ # embedding layers
100
+ # layer_num 0: word_embeddings
101
+ # layer_num 1: position_embeddings
102
+ # layer_num 2: token_type_embeddings
103
+ continue
104
+ elif layer_num == 3:
105
+ # embedding LayerNorm
106
+ trace.extend(["embeddings", "LayerNorm"])
107
+ pointer = getattr(pointer, "embeddings")
108
+ pointer = getattr(pointer, "LayerNorm")
109
+ elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
110
+ # encoder layers
111
+ trace.extend(["encoder", "layer", str(layer_num - 4)])
112
+ pointer = getattr(pointer, "encoder")
113
+ pointer = getattr(pointer, "layer")
114
+ pointer = pointer[layer_num - 4]
115
+ elif layer_num == config.num_hidden_layers + 4:
116
+ # pooler layer
117
+ trace.extend(["pooler", "dense"])
118
+ pointer = getattr(pointer, "pooler")
119
+ pointer = getattr(pointer, "dense")
120
+ elif m_name == "embeddings":
121
+ trace.append("embeddings")
122
+ pointer = getattr(pointer, "embeddings")
123
+ if layer_num == 0:
124
+ trace.append("word_embeddings")
125
+ pointer = getattr(pointer, "word_embeddings")
126
+ elif layer_num == 1:
127
+ trace.append("position_embeddings")
128
+ pointer = getattr(pointer, "position_embeddings")
129
+ elif layer_num == 2:
130
+ trace.append("token_type_embeddings")
131
+ pointer = getattr(pointer, "token_type_embeddings")
132
+ else:
133
+ raise ValueError(f"Unknown embedding layer with name {full_name}")
134
+ trace.append("weight")
135
+ pointer = getattr(pointer, "weight")
136
+ elif m_name == "_attention_layer":
137
+ # self-attention layer
138
+ trace.extend(["attention", "self"])
139
+ pointer = getattr(pointer, "attention")
140
+ pointer = getattr(pointer, "self")
141
+ elif m_name == "_attention_layer_norm":
142
+ # output attention norm
143
+ trace.extend(["attention", "output", "LayerNorm"])
144
+ pointer = getattr(pointer, "attention")
145
+ pointer = getattr(pointer, "output")
146
+ pointer = getattr(pointer, "LayerNorm")
147
+ elif m_name == "_attention_output_dense":
148
+ # output attention dense
149
+ trace.extend(["attention", "output", "dense"])
150
+ pointer = getattr(pointer, "attention")
151
+ pointer = getattr(pointer, "output")
152
+ pointer = getattr(pointer, "dense")
153
+ elif m_name == "_output_dense":
154
+ # output dense
155
+ trace.extend(["output", "dense"])
156
+ pointer = getattr(pointer, "output")
157
+ pointer = getattr(pointer, "dense")
158
+ elif m_name == "_output_layer_norm":
159
+ # output dense
160
+ trace.extend(["output", "LayerNorm"])
161
+ pointer = getattr(pointer, "output")
162
+ pointer = getattr(pointer, "LayerNorm")
163
+ elif m_name == "_key_dense":
164
+ # attention key
165
+ trace.append("key")
166
+ pointer = getattr(pointer, "key")
167
+ elif m_name == "_query_dense":
168
+ # attention query
169
+ trace.append("query")
170
+ pointer = getattr(pointer, "query")
171
+ elif m_name == "_value_dense":
172
+ # attention value
173
+ trace.append("value")
174
+ pointer = getattr(pointer, "value")
175
+ elif m_name == "_intermediate_dense":
176
+ # attention intermediate dense
177
+ trace.extend(["intermediate", "dense"])
178
+ pointer = getattr(pointer, "intermediate")
179
+ pointer = getattr(pointer, "dense")
180
+ elif m_name == "_output_layer_norm":
181
+ # output layer norm
182
+ trace.append("output")
183
+ pointer = getattr(pointer, "output")
184
+ # weights & biases
185
+ elif m_name in ["bias", "beta"]:
186
+ trace.append("bias")
187
+ pointer = getattr(pointer, "bias")
188
+ elif m_name in ["kernel", "gamma"]:
189
+ trace.append("weight")
190
+ pointer = getattr(pointer, "weight")
191
+ else:
192
+ logger.warning(f"Ignored {m_name}")
193
+ # for certain layers reshape is necessary
194
+ trace = ".".join(trace)
195
+ if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", trace) or re.match(
196
+ r"(\S+)\.attention\.output\.dense\.weight", trace
197
+ ):
198
+ array = array.reshape(pointer.data.shape)
199
+ if "kernel" in full_name:
200
+ array = array.transpose()
201
+ if pointer.shape == array.shape:
202
+ pointer.data = torch.from_numpy(array)
203
+ else:
204
+ raise ValueError(
205
+ f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
206
+ f" {array.shape}"
207
+ )
208
+ logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}")
209
+ return model
210
+
211
+
212
+ def convert_tf2_checkpoint_to_pytorch(tf_checkpoint_path, config_path, pytorch_dump_path):
213
+ # Instantiate model
214
+ logger.info(f"Loading model based on config from {config_path}...")
215
+ config = BertConfig.from_json_file(config_path)
216
+ model = BertModel(config)
217
+
218
+ # Load weights from checkpoint
219
+ logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}...")
220
+ load_tf2_weights_in_bert(model, tf_checkpoint_path, config)
221
+
222
+ # Save pytorch-model
223
+ logger.info(f"Saving PyTorch model to {pytorch_dump_path}...")
224
+ torch.save(model.state_dict(), pytorch_dump_path)
225
+
226
+
227
+ if __name__ == "__main__":
228
+ parser = argparse.ArgumentParser()
229
+ parser.add_argument(
230
+ "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
231
+ )
232
+ parser.add_argument(
233
+ "--bert_config_file",
234
+ type=str,
235
+ required=True,
236
+ help="The config json file corresponding to the BERT model. This specifies the model architecture.",
237
+ )
238
+ parser.add_argument(
239
+ "--pytorch_dump_path",
240
+ type=str,
241
+ required=True,
242
+ help="Path to the output PyTorch model (must include filename).",
243
+ )
244
+ args = parser.parse_args()
245
+ convert_tf2_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
30
+ # Initialise PyTorch model
31
+ config = BertConfig.from_json_file(bert_config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = BertForPreTraining(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_bert(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--bert_config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help=(
55
+ "The config json file corresponding to the pre-trained BERT model. \n"
56
+ "This specifies the model architecture."
57
+ ),
58
+ )
59
+ parser.add_argument(
60
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
61
+ )
62
+ args = parser.parse_args()
63
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/modeling_bert.py ADDED
@@ -0,0 +1,1867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch BERT model."""
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ BaseModelOutputWithPoolingAndCrossAttentions,
33
+ CausalLMOutputWithCrossAttentions,
34
+ MaskedLMOutput,
35
+ MultipleChoiceModelOutput,
36
+ NextSentencePredictorOutput,
37
+ QuestionAnsweringModelOutput,
38
+ SequenceClassifierOutput,
39
+ TokenClassifierOutput,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
43
+ from ...utils import (
44
+ ModelOutput,
45
+ add_code_sample_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_bert import BertConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
57
+ _CONFIG_FOR_DOC = "BertConfig"
58
+
59
+ # TokenClassification docstring
60
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "dbmdz/bert-large-cased-finetuned-conll03-english"
61
+ _TOKEN_CLASS_EXPECTED_OUTPUT = (
62
+ "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] "
63
+ )
64
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.01
65
+
66
+ # QuestionAnswering docstring
67
+ _CHECKPOINT_FOR_QA = "deepset/bert-base-cased-squad2"
68
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
69
+ _QA_EXPECTED_LOSS = 7.41
70
+ _QA_TARGET_START_INDEX = 14
71
+ _QA_TARGET_END_INDEX = 15
72
+
73
+ # SequenceClassification docstring
74
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "textattack/bert-base-uncased-yelp-polarity"
75
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'"
76
+ _SEQ_CLASS_EXPECTED_LOSS = 0.01
77
+
78
+
79
+ from ..deprecated._archive_maps import BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
80
+
81
+
82
+ def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
83
+ """Load tf checkpoints in a pytorch model."""
84
+ try:
85
+ import re
86
+
87
+ import numpy as np
88
+ import tensorflow as tf
89
+ except ImportError:
90
+ logger.error(
91
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
92
+ "https://www.tensorflow.org/install/ for installation instructions."
93
+ )
94
+ raise
95
+ tf_path = os.path.abspath(tf_checkpoint_path)
96
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
97
+ # Load weights from TF model
98
+ init_vars = tf.train.list_variables(tf_path)
99
+ names = []
100
+ arrays = []
101
+ for name, shape in init_vars:
102
+ logger.info(f"Loading TF weight {name} with shape {shape}")
103
+ array = tf.train.load_variable(tf_path, name)
104
+ names.append(name)
105
+ arrays.append(array)
106
+
107
+ for name, array in zip(names, arrays):
108
+ name = name.split("/")
109
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
110
+ # which are not required for using pretrained model
111
+ if any(
112
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
113
+ for n in name
114
+ ):
115
+ logger.info(f"Skipping {'/'.join(name)}")
116
+ continue
117
+ pointer = model
118
+ for m_name in name:
119
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
120
+ scope_names = re.split(r"_(\d+)", m_name)
121
+ else:
122
+ scope_names = [m_name]
123
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
124
+ pointer = getattr(pointer, "weight")
125
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
126
+ pointer = getattr(pointer, "bias")
127
+ elif scope_names[0] == "output_weights":
128
+ pointer = getattr(pointer, "weight")
129
+ elif scope_names[0] == "squad":
130
+ pointer = getattr(pointer, "classifier")
131
+ else:
132
+ try:
133
+ pointer = getattr(pointer, scope_names[0])
134
+ except AttributeError:
135
+ logger.info(f"Skipping {'/'.join(name)}")
136
+ continue
137
+ if len(scope_names) >= 2:
138
+ num = int(scope_names[1])
139
+ pointer = pointer[num]
140
+ if m_name[-11:] == "_embeddings":
141
+ pointer = getattr(pointer, "weight")
142
+ elif m_name == "kernel":
143
+ array = np.transpose(array)
144
+ try:
145
+ if pointer.shape != array.shape:
146
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
147
+ except ValueError as e:
148
+ e.args += (pointer.shape, array.shape)
149
+ raise
150
+ logger.info(f"Initialize PyTorch weight {name}")
151
+ pointer.data = torch.from_numpy(array)
152
+ return model
153
+
154
+
155
+ class BertEmbeddings(nn.Module):
156
+ """Construct the embeddings from word, position and token_type embeddings."""
157
+
158
+ def __init__(self, config):
159
+ super().__init__()
160
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
161
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
162
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
163
+
164
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
165
+ # any TensorFlow checkpoint file
166
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
167
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
168
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
169
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
170
+ self.register_buffer(
171
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
172
+ )
173
+ self.register_buffer(
174
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
175
+ )
176
+
177
+ def forward(
178
+ self,
179
+ input_ids: Optional[torch.LongTensor] = None,
180
+ token_type_ids: Optional[torch.LongTensor] = None,
181
+ position_ids: Optional[torch.LongTensor] = None,
182
+ inputs_embeds: Optional[torch.FloatTensor] = None,
183
+ past_key_values_length: int = 0,
184
+ ) -> torch.Tensor:
185
+ if input_ids is not None:
186
+ input_shape = input_ids.size()
187
+ else:
188
+ input_shape = inputs_embeds.size()[:-1]
189
+
190
+ seq_length = input_shape[1]
191
+
192
+ if position_ids is None:
193
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
194
+
195
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
196
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
197
+ # issue #5664
198
+ if token_type_ids is None:
199
+ if hasattr(self, "token_type_ids"):
200
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
201
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
202
+ token_type_ids = buffered_token_type_ids_expanded
203
+ else:
204
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
205
+
206
+ if inputs_embeds is None:
207
+ inputs_embeds = self.word_embeddings(input_ids)
208
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
209
+
210
+ embeddings = inputs_embeds + token_type_embeddings
211
+ if self.position_embedding_type == "absolute":
212
+ position_embeddings = self.position_embeddings(position_ids)
213
+ embeddings += position_embeddings
214
+ embeddings = self.LayerNorm(embeddings)
215
+ embeddings = self.dropout(embeddings)
216
+ return embeddings
217
+
218
+
219
+ class BertSelfAttention(nn.Module):
220
+ def __init__(self, config, position_embedding_type=None):
221
+ super().__init__()
222
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
223
+ raise ValueError(
224
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
225
+ f"heads ({config.num_attention_heads})"
226
+ )
227
+
228
+ self.num_attention_heads = config.num_attention_heads
229
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
230
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
231
+
232
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
233
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
234
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
235
+
236
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
237
+ self.position_embedding_type = position_embedding_type or getattr(
238
+ config, "position_embedding_type", "absolute"
239
+ )
240
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
241
+ self.max_position_embeddings = config.max_position_embeddings
242
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
243
+
244
+ self.is_decoder = config.is_decoder
245
+
246
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
247
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
248
+ x = x.view(new_x_shape)
249
+ return x.permute(0, 2, 1, 3)
250
+
251
+ def forward(
252
+ self,
253
+ hidden_states: torch.Tensor,
254
+ attention_mask: Optional[torch.FloatTensor] = None,
255
+ head_mask: Optional[torch.FloatTensor] = None,
256
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
257
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
258
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
259
+ output_attentions: Optional[bool] = False,
260
+ ) -> Tuple[torch.Tensor]:
261
+ mixed_query_layer = self.query(hidden_states)
262
+
263
+ # If this is instantiated as a cross-attention module, the keys
264
+ # and values come from an encoder; the attention mask needs to be
265
+ # such that the encoder's padding tokens are not attended to.
266
+ is_cross_attention = encoder_hidden_states is not None
267
+
268
+ if is_cross_attention and past_key_value is not None:
269
+ # reuse k,v, cross_attentions
270
+ key_layer = past_key_value[0]
271
+ value_layer = past_key_value[1]
272
+ attention_mask = encoder_attention_mask
273
+ elif is_cross_attention:
274
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
275
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
276
+ attention_mask = encoder_attention_mask
277
+ elif past_key_value is not None:
278
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
279
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
280
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
281
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
282
+ else:
283
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
284
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
285
+
286
+ query_layer = self.transpose_for_scores(mixed_query_layer)
287
+
288
+ use_cache = past_key_value is not None
289
+ if self.is_decoder:
290
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
291
+ # Further calls to cross_attention layer can then reuse all cross-attention
292
+ # key/value_states (first "if" case)
293
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
294
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
295
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
296
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
297
+ past_key_value = (key_layer, value_layer)
298
+
299
+ # Take the dot product between "query" and "key" to get the raw attention scores.
300
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
301
+
302
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
303
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
304
+ if use_cache:
305
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
306
+ -1, 1
307
+ )
308
+ else:
309
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
310
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
311
+ distance = position_ids_l - position_ids_r
312
+
313
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
314
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
315
+
316
+ if self.position_embedding_type == "relative_key":
317
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
318
+ attention_scores = attention_scores + relative_position_scores
319
+ elif self.position_embedding_type == "relative_key_query":
320
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
321
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
322
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
323
+
324
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
325
+ if attention_mask is not None:
326
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
327
+ attention_scores = attention_scores + attention_mask
328
+
329
+ # Normalize the attention scores to probabilities.
330
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
331
+
332
+ # This is actually dropping out entire tokens to attend to, which might
333
+ # seem a bit unusual, but is taken from the original Transformer paper.
334
+ attention_probs = self.dropout(attention_probs)
335
+
336
+ # Mask heads if we want to
337
+ if head_mask is not None:
338
+ attention_probs = attention_probs * head_mask
339
+
340
+ context_layer = torch.matmul(attention_probs, value_layer)
341
+
342
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
343
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
344
+ context_layer = context_layer.view(new_context_layer_shape)
345
+
346
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
347
+
348
+ if self.is_decoder:
349
+ outputs = outputs + (past_key_value,)
350
+ return outputs
351
+
352
+
353
+ class BertSelfOutput(nn.Module):
354
+ def __init__(self, config):
355
+ super().__init__()
356
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
357
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
358
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
359
+
360
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
361
+ hidden_states = self.dense(hidden_states)
362
+ hidden_states = self.dropout(hidden_states)
363
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
364
+ return hidden_states
365
+
366
+
367
+ class BertAttention(nn.Module):
368
+ def __init__(self, config, position_embedding_type=None):
369
+ super().__init__()
370
+ self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
371
+ self.output = BertSelfOutput(config)
372
+ self.pruned_heads = set()
373
+
374
+ def prune_heads(self, heads):
375
+ if len(heads) == 0:
376
+ return
377
+ heads, index = find_pruneable_heads_and_indices(
378
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
379
+ )
380
+
381
+ # Prune linear layers
382
+ self.self.query = prune_linear_layer(self.self.query, index)
383
+ self.self.key = prune_linear_layer(self.self.key, index)
384
+ self.self.value = prune_linear_layer(self.self.value, index)
385
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
386
+
387
+ # Update hyper params and store pruned heads
388
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
389
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
390
+ self.pruned_heads = self.pruned_heads.union(heads)
391
+
392
+ def forward(
393
+ self,
394
+ hidden_states: torch.Tensor,
395
+ attention_mask: Optional[torch.FloatTensor] = None,
396
+ head_mask: Optional[torch.FloatTensor] = None,
397
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
398
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
399
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
400
+ output_attentions: Optional[bool] = False,
401
+ ) -> Tuple[torch.Tensor]:
402
+ self_outputs = self.self(
403
+ hidden_states,
404
+ attention_mask,
405
+ head_mask,
406
+ encoder_hidden_states,
407
+ encoder_attention_mask,
408
+ past_key_value,
409
+ output_attentions,
410
+ )
411
+ attention_output = self.output(self_outputs[0], hidden_states)
412
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
413
+ return outputs
414
+
415
+
416
+ class BertIntermediate(nn.Module):
417
+ def __init__(self, config):
418
+ super().__init__()
419
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
420
+ if isinstance(config.hidden_act, str):
421
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
422
+ else:
423
+ self.intermediate_act_fn = config.hidden_act
424
+
425
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
426
+ hidden_states = self.dense(hidden_states)
427
+ hidden_states = self.intermediate_act_fn(hidden_states)
428
+ return hidden_states
429
+
430
+
431
+ class BertOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ class BertLayer(nn.Module):
446
+ def __init__(self, config):
447
+ super().__init__()
448
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
449
+ self.seq_len_dim = 1
450
+ self.attention = BertAttention(config)
451
+ self.is_decoder = config.is_decoder
452
+ self.add_cross_attention = config.add_cross_attention
453
+ if self.add_cross_attention:
454
+ if not self.is_decoder:
455
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
456
+ self.crossattention = BertAttention(config, position_embedding_type="absolute")
457
+ self.intermediate = BertIntermediate(config)
458
+ self.output = BertOutput(config)
459
+
460
+ def forward(
461
+ self,
462
+ hidden_states: torch.Tensor,
463
+ attention_mask: Optional[torch.FloatTensor] = None,
464
+ head_mask: Optional[torch.FloatTensor] = None,
465
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
466
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
467
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
468
+ output_attentions: Optional[bool] = False,
469
+ ) -> Tuple[torch.Tensor]:
470
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
471
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
472
+ self_attention_outputs = self.attention(
473
+ hidden_states,
474
+ attention_mask,
475
+ head_mask,
476
+ output_attentions=output_attentions,
477
+ past_key_value=self_attn_past_key_value,
478
+ )
479
+ attention_output = self_attention_outputs[0]
480
+
481
+ # if decoder, the last output is tuple of self-attn cache
482
+ if self.is_decoder:
483
+ outputs = self_attention_outputs[1:-1]
484
+ present_key_value = self_attention_outputs[-1]
485
+ else:
486
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
487
+
488
+ cross_attn_present_key_value = None
489
+ if self.is_decoder and encoder_hidden_states is not None:
490
+ if not hasattr(self, "crossattention"):
491
+ raise ValueError(
492
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
493
+ " by setting `config.add_cross_attention=True`"
494
+ )
495
+
496
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
497
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
498
+ cross_attention_outputs = self.crossattention(
499
+ attention_output,
500
+ attention_mask,
501
+ head_mask,
502
+ encoder_hidden_states,
503
+ encoder_attention_mask,
504
+ cross_attn_past_key_value,
505
+ output_attentions,
506
+ )
507
+ attention_output = cross_attention_outputs[0]
508
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
509
+
510
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
511
+ cross_attn_present_key_value = cross_attention_outputs[-1]
512
+ present_key_value = present_key_value + cross_attn_present_key_value
513
+
514
+ layer_output = apply_chunking_to_forward(
515
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
516
+ )
517
+ outputs = (layer_output,) + outputs
518
+
519
+ # if decoder, return the attn key/values as the last output
520
+ if self.is_decoder:
521
+ outputs = outputs + (present_key_value,)
522
+
523
+ return outputs
524
+
525
+ def feed_forward_chunk(self, attention_output):
526
+ intermediate_output = self.intermediate(attention_output)
527
+ layer_output = self.output(intermediate_output, attention_output)
528
+ return layer_output
529
+
530
+
531
+ class BertEncoder(nn.Module):
532
+ def __init__(self, config):
533
+ super().__init__()
534
+ self.config = config
535
+ self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
536
+ self.gradient_checkpointing = False
537
+
538
+ def forward(
539
+ self,
540
+ hidden_states: torch.Tensor,
541
+ attention_mask: Optional[torch.FloatTensor] = None,
542
+ head_mask: Optional[torch.FloatTensor] = None,
543
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
544
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
545
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
546
+ use_cache: Optional[bool] = None,
547
+ output_attentions: Optional[bool] = False,
548
+ output_hidden_states: Optional[bool] = False,
549
+ return_dict: Optional[bool] = True,
550
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
551
+ all_hidden_states = () if output_hidden_states else None
552
+ all_self_attentions = () if output_attentions else None
553
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
554
+
555
+ if self.gradient_checkpointing and self.training:
556
+ if use_cache:
557
+ logger.warning_once(
558
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
559
+ )
560
+ use_cache = False
561
+
562
+ next_decoder_cache = () if use_cache else None
563
+ for i, layer_module in enumerate(self.layer):
564
+ if output_hidden_states:
565
+ all_hidden_states = all_hidden_states + (hidden_states,)
566
+
567
+ layer_head_mask = head_mask[i] if head_mask is not None else None
568
+ past_key_value = past_key_values[i] if past_key_values is not None else None
569
+
570
+ if self.gradient_checkpointing and self.training:
571
+ layer_outputs = self._gradient_checkpointing_func(
572
+ layer_module.__call__,
573
+ hidden_states,
574
+ attention_mask,
575
+ layer_head_mask,
576
+ encoder_hidden_states,
577
+ encoder_attention_mask,
578
+ past_key_value,
579
+ output_attentions,
580
+ )
581
+ else:
582
+ layer_outputs = layer_module(
583
+ hidden_states,
584
+ attention_mask,
585
+ layer_head_mask,
586
+ encoder_hidden_states,
587
+ encoder_attention_mask,
588
+ past_key_value,
589
+ output_attentions,
590
+ )
591
+
592
+ hidden_states = layer_outputs[0]
593
+ if use_cache:
594
+ next_decoder_cache += (layer_outputs[-1],)
595
+ if output_attentions:
596
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
597
+ if self.config.add_cross_attention:
598
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
599
+
600
+ if output_hidden_states:
601
+ all_hidden_states = all_hidden_states + (hidden_states,)
602
+
603
+ if not return_dict:
604
+ return tuple(
605
+ v
606
+ for v in [
607
+ hidden_states,
608
+ next_decoder_cache,
609
+ all_hidden_states,
610
+ all_self_attentions,
611
+ all_cross_attentions,
612
+ ]
613
+ if v is not None
614
+ )
615
+ return BaseModelOutputWithPastAndCrossAttentions(
616
+ last_hidden_state=hidden_states,
617
+ past_key_values=next_decoder_cache,
618
+ hidden_states=all_hidden_states,
619
+ attentions=all_self_attentions,
620
+ cross_attentions=all_cross_attentions,
621
+ )
622
+
623
+
624
+ class BertPooler(nn.Module):
625
+ def __init__(self, config):
626
+ super().__init__()
627
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
628
+ self.activation = nn.Tanh()
629
+
630
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
631
+ # We "pool" the model by simply taking the hidden state corresponding
632
+ # to the first token.
633
+ first_token_tensor = hidden_states[:, 0]
634
+ pooled_output = self.dense(first_token_tensor)
635
+ pooled_output = self.activation(pooled_output)
636
+ return pooled_output
637
+
638
+
639
+ class BertPredictionHeadTransform(nn.Module):
640
+ def __init__(self, config):
641
+ super().__init__()
642
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
643
+ if isinstance(config.hidden_act, str):
644
+ self.transform_act_fn = ACT2FN[config.hidden_act]
645
+ else:
646
+ self.transform_act_fn = config.hidden_act
647
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
648
+
649
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
650
+ hidden_states = self.dense(hidden_states)
651
+ hidden_states = self.transform_act_fn(hidden_states)
652
+ hidden_states = self.LayerNorm(hidden_states)
653
+ return hidden_states
654
+
655
+
656
+ class BertLMPredictionHead(nn.Module):
657
+ def __init__(self, config):
658
+ super().__init__()
659
+ self.transform = BertPredictionHeadTransform(config)
660
+
661
+ # The output weights are the same as the input embeddings, but there is
662
+ # an output-only bias for each token.
663
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
664
+
665
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
666
+
667
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
668
+ self.decoder.bias = self.bias
669
+
670
+ def forward(self, hidden_states):
671
+ hidden_states = self.transform(hidden_states)
672
+ hidden_states = self.decoder(hidden_states)
673
+ return hidden_states
674
+
675
+
676
+ class BertOnlyMLMHead(nn.Module):
677
+ def __init__(self, config):
678
+ super().__init__()
679
+ self.predictions = BertLMPredictionHead(config)
680
+
681
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
682
+ prediction_scores = self.predictions(sequence_output)
683
+ return prediction_scores
684
+
685
+
686
+ class BertOnlyNSPHead(nn.Module):
687
+ def __init__(self, config):
688
+ super().__init__()
689
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
690
+
691
+ def forward(self, pooled_output):
692
+ seq_relationship_score = self.seq_relationship(pooled_output)
693
+ return seq_relationship_score
694
+
695
+
696
+ class BertPreTrainingHeads(nn.Module):
697
+ def __init__(self, config):
698
+ super().__init__()
699
+ self.predictions = BertLMPredictionHead(config)
700
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
701
+
702
+ def forward(self, sequence_output, pooled_output):
703
+ prediction_scores = self.predictions(sequence_output)
704
+ seq_relationship_score = self.seq_relationship(pooled_output)
705
+ return prediction_scores, seq_relationship_score
706
+
707
+
708
+ class BertPreTrainedModel(PreTrainedModel):
709
+ """
710
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
711
+ models.
712
+ """
713
+
714
+ config_class = BertConfig
715
+ load_tf_weights = load_tf_weights_in_bert
716
+ base_model_prefix = "bert"
717
+ supports_gradient_checkpointing = True
718
+
719
+ def _init_weights(self, module):
720
+ """Initialize the weights"""
721
+ if isinstance(module, nn.Linear):
722
+ # Slightly different from the TF version which uses truncated_normal for initialization
723
+ # cf https://github.com/pytorch/pytorch/pull/5617
724
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
725
+ if module.bias is not None:
726
+ module.bias.data.zero_()
727
+ elif isinstance(module, nn.Embedding):
728
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
729
+ if module.padding_idx is not None:
730
+ module.weight.data[module.padding_idx].zero_()
731
+ elif isinstance(module, nn.LayerNorm):
732
+ module.bias.data.zero_()
733
+ module.weight.data.fill_(1.0)
734
+
735
+
736
+ @dataclass
737
+ class BertForPreTrainingOutput(ModelOutput):
738
+ """
739
+ Output type of [`BertForPreTraining`].
740
+
741
+ Args:
742
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
743
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
744
+ (classification) loss.
745
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
746
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
747
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
748
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
749
+ before SoftMax).
750
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
751
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
752
+ shape `(batch_size, sequence_length, hidden_size)`.
753
+
754
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
755
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
756
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
757
+ sequence_length)`.
758
+
759
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
760
+ heads.
761
+ """
762
+
763
+ loss: Optional[torch.FloatTensor] = None
764
+ prediction_logits: torch.FloatTensor = None
765
+ seq_relationship_logits: torch.FloatTensor = None
766
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
767
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
768
+
769
+
770
+ BERT_START_DOCSTRING = r"""
771
+
772
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
773
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
774
+ etc.)
775
+
776
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
777
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
778
+ and behavior.
779
+
780
+ Parameters:
781
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
782
+ Initializing with a config file does not load the weights associated with the model, only the
783
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
784
+ """
785
+
786
+ BERT_INPUTS_DOCSTRING = r"""
787
+ Args:
788
+ input_ids (`torch.LongTensor` of shape `({0})`):
789
+ Indices of input sequence tokens in the vocabulary.
790
+
791
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
792
+ [`PreTrainedTokenizer.__call__`] for details.
793
+
794
+ [What are input IDs?](../glossary#input-ids)
795
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
796
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
797
+
798
+ - 1 for tokens that are **not masked**,
799
+ - 0 for tokens that are **masked**.
800
+
801
+ [What are attention masks?](../glossary#attention-mask)
802
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
803
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
804
+ 1]`:
805
+
806
+ - 0 corresponds to a *sentence A* token,
807
+ - 1 corresponds to a *sentence B* token.
808
+
809
+ [What are token type IDs?](../glossary#token-type-ids)
810
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
811
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
812
+ config.max_position_embeddings - 1]`.
813
+
814
+ [What are position IDs?](../glossary#position-ids)
815
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
816
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
817
+
818
+ - 1 indicates the head is **not masked**,
819
+ - 0 indicates the head is **masked**.
820
+
821
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
822
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
823
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
824
+ model's internal embedding lookup matrix.
825
+ output_attentions (`bool`, *optional*):
826
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
827
+ tensors for more detail.
828
+ output_hidden_states (`bool`, *optional*):
829
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
830
+ more detail.
831
+ return_dict (`bool`, *optional*):
832
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
833
+ """
834
+
835
+
836
+ @add_start_docstrings(
837
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
838
+ BERT_START_DOCSTRING,
839
+ )
840
+ class BertModel(BertPreTrainedModel):
841
+ """
842
+
843
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
844
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
845
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
846
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
847
+
848
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
849
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
850
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
851
+ """
852
+
853
+ def __init__(self, config, add_pooling_layer=True):
854
+ super().__init__(config)
855
+ self.config = config
856
+
857
+ self.embeddings = BertEmbeddings(config)
858
+ self.encoder = BertEncoder(config)
859
+
860
+ self.pooler = BertPooler(config) if add_pooling_layer else None
861
+
862
+ # Initialize weights and apply final processing
863
+ self.post_init()
864
+
865
+ def get_input_embeddings(self):
866
+ return self.embeddings.word_embeddings
867
+
868
+ def set_input_embeddings(self, value):
869
+ self.embeddings.word_embeddings = value
870
+
871
+ def _prune_heads(self, heads_to_prune):
872
+ """
873
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
874
+ class PreTrainedModel
875
+ """
876
+ for layer, heads in heads_to_prune.items():
877
+ self.encoder.layer[layer].attention.prune_heads(heads)
878
+
879
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
880
+ @add_code_sample_docstrings(
881
+ checkpoint=_CHECKPOINT_FOR_DOC,
882
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
883
+ config_class=_CONFIG_FOR_DOC,
884
+ )
885
+ def forward(
886
+ self,
887
+ input_ids: Optional[torch.Tensor] = None,
888
+ attention_mask: Optional[torch.Tensor] = None,
889
+ token_type_ids: Optional[torch.Tensor] = None,
890
+ position_ids: Optional[torch.Tensor] = None,
891
+ head_mask: Optional[torch.Tensor] = None,
892
+ inputs_embeds: Optional[torch.Tensor] = None,
893
+ encoder_hidden_states: Optional[torch.Tensor] = None,
894
+ encoder_attention_mask: Optional[torch.Tensor] = None,
895
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
896
+ use_cache: Optional[bool] = None,
897
+ output_attentions: Optional[bool] = None,
898
+ output_hidden_states: Optional[bool] = None,
899
+ return_dict: Optional[bool] = None,
900
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
901
+ r"""
902
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
903
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
904
+ the model is configured as a decoder.
905
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
906
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
907
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
908
+
909
+ - 1 for tokens that are **not masked**,
910
+ - 0 for tokens that are **masked**.
911
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
912
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
913
+
914
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
915
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
916
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
917
+ use_cache (`bool`, *optional*):
918
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
919
+ `past_key_values`).
920
+ """
921
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
922
+ output_hidden_states = (
923
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
924
+ )
925
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
926
+
927
+ if self.config.is_decoder:
928
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
929
+ else:
930
+ use_cache = False
931
+
932
+ if input_ids is not None and inputs_embeds is not None:
933
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
934
+ elif input_ids is not None:
935
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
936
+ input_shape = input_ids.size()
937
+ elif inputs_embeds is not None:
938
+ input_shape = inputs_embeds.size()[:-1]
939
+ else:
940
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
941
+
942
+ batch_size, seq_length = input_shape
943
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
944
+
945
+ # past_key_values_length
946
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
947
+
948
+ if attention_mask is None:
949
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
950
+
951
+ if token_type_ids is None:
952
+ if hasattr(self.embeddings, "token_type_ids"):
953
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
954
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
955
+ token_type_ids = buffered_token_type_ids_expanded
956
+ else:
957
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
958
+
959
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
960
+ # ourselves in which case we just need to make it broadcastable to all heads.
961
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
962
+
963
+ # If a 2D or 3D attention mask is provided for the cross-attention
964
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
965
+ if self.config.is_decoder and encoder_hidden_states is not None:
966
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
967
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
968
+ if encoder_attention_mask is None:
969
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
970
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
971
+ else:
972
+ encoder_extended_attention_mask = None
973
+
974
+ # Prepare head mask if needed
975
+ # 1.0 in head_mask indicate we keep the head
976
+ # attention_probs has shape bsz x n_heads x N x N
977
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
978
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
979
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
980
+
981
+ embedding_output = self.embeddings(
982
+ input_ids=input_ids,
983
+ position_ids=position_ids,
984
+ token_type_ids=token_type_ids,
985
+ inputs_embeds=inputs_embeds,
986
+ past_key_values_length=past_key_values_length,
987
+ )
988
+ encoder_outputs = self.encoder(
989
+ embedding_output,
990
+ attention_mask=extended_attention_mask,
991
+ head_mask=head_mask,
992
+ encoder_hidden_states=encoder_hidden_states,
993
+ encoder_attention_mask=encoder_extended_attention_mask,
994
+ past_key_values=past_key_values,
995
+ use_cache=use_cache,
996
+ output_attentions=output_attentions,
997
+ output_hidden_states=output_hidden_states,
998
+ return_dict=return_dict,
999
+ )
1000
+ sequence_output = encoder_outputs[0]
1001
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1002
+
1003
+ if not return_dict:
1004
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1005
+
1006
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1007
+ last_hidden_state=sequence_output,
1008
+ pooler_output=pooled_output,
1009
+ past_key_values=encoder_outputs.past_key_values,
1010
+ hidden_states=encoder_outputs.hidden_states,
1011
+ attentions=encoder_outputs.attentions,
1012
+ cross_attentions=encoder_outputs.cross_attentions,
1013
+ )
1014
+
1015
+
1016
+ @add_start_docstrings(
1017
+ """
1018
+ Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
1019
+ sentence prediction (classification)` head.
1020
+ """,
1021
+ BERT_START_DOCSTRING,
1022
+ )
1023
+ class BertForPreTraining(BertPreTrainedModel):
1024
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1025
+
1026
+ def __init__(self, config):
1027
+ super().__init__(config)
1028
+
1029
+ self.bert = BertModel(config)
1030
+ self.cls = BertPreTrainingHeads(config)
1031
+
1032
+ # Initialize weights and apply final processing
1033
+ self.post_init()
1034
+
1035
+ def get_output_embeddings(self):
1036
+ return self.cls.predictions.decoder
1037
+
1038
+ def set_output_embeddings(self, new_embeddings):
1039
+ self.cls.predictions.decoder = new_embeddings
1040
+
1041
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1042
+ @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1043
+ def forward(
1044
+ self,
1045
+ input_ids: Optional[torch.Tensor] = None,
1046
+ attention_mask: Optional[torch.Tensor] = None,
1047
+ token_type_ids: Optional[torch.Tensor] = None,
1048
+ position_ids: Optional[torch.Tensor] = None,
1049
+ head_mask: Optional[torch.Tensor] = None,
1050
+ inputs_embeds: Optional[torch.Tensor] = None,
1051
+ labels: Optional[torch.Tensor] = None,
1052
+ next_sentence_label: Optional[torch.Tensor] = None,
1053
+ output_attentions: Optional[bool] = None,
1054
+ output_hidden_states: Optional[bool] = None,
1055
+ return_dict: Optional[bool] = None,
1056
+ ) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]:
1057
+ r"""
1058
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1059
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1060
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
1061
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1062
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1063
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
1064
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1065
+
1066
+ - 0 indicates sequence B is a continuation of sequence A,
1067
+ - 1 indicates sequence B is a random sequence.
1068
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1069
+ Used to hide legacy arguments that have been deprecated.
1070
+
1071
+ Returns:
1072
+
1073
+ Example:
1074
+
1075
+ ```python
1076
+ >>> from transformers import AutoTokenizer, BertForPreTraining
1077
+ >>> import torch
1078
+
1079
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1080
+ >>> model = BertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
1081
+
1082
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1083
+ >>> outputs = model(**inputs)
1084
+
1085
+ >>> prediction_logits = outputs.prediction_logits
1086
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1087
+ ```
1088
+ """
1089
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1090
+
1091
+ outputs = self.bert(
1092
+ input_ids,
1093
+ attention_mask=attention_mask,
1094
+ token_type_ids=token_type_ids,
1095
+ position_ids=position_ids,
1096
+ head_mask=head_mask,
1097
+ inputs_embeds=inputs_embeds,
1098
+ output_attentions=output_attentions,
1099
+ output_hidden_states=output_hidden_states,
1100
+ return_dict=return_dict,
1101
+ )
1102
+
1103
+ sequence_output, pooled_output = outputs[:2]
1104
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1105
+
1106
+ total_loss = None
1107
+ if labels is not None and next_sentence_label is not None:
1108
+ loss_fct = CrossEntropyLoss()
1109
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1110
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1111
+ total_loss = masked_lm_loss + next_sentence_loss
1112
+
1113
+ if not return_dict:
1114
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1115
+ return ((total_loss,) + output) if total_loss is not None else output
1116
+
1117
+ return BertForPreTrainingOutput(
1118
+ loss=total_loss,
1119
+ prediction_logits=prediction_scores,
1120
+ seq_relationship_logits=seq_relationship_score,
1121
+ hidden_states=outputs.hidden_states,
1122
+ attentions=outputs.attentions,
1123
+ )
1124
+
1125
+
1126
+ @add_start_docstrings(
1127
+ """Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING
1128
+ )
1129
+ class BertLMHeadModel(BertPreTrainedModel):
1130
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
1131
+
1132
+ def __init__(self, config):
1133
+ super().__init__(config)
1134
+
1135
+ if not config.is_decoder:
1136
+ logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
1137
+
1138
+ self.bert = BertModel(config, add_pooling_layer=False)
1139
+ self.cls = BertOnlyMLMHead(config)
1140
+
1141
+ # Initialize weights and apply final processing
1142
+ self.post_init()
1143
+
1144
+ def get_output_embeddings(self):
1145
+ return self.cls.predictions.decoder
1146
+
1147
+ def set_output_embeddings(self, new_embeddings):
1148
+ self.cls.predictions.decoder = new_embeddings
1149
+
1150
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1151
+ @add_code_sample_docstrings(
1152
+ checkpoint=_CHECKPOINT_FOR_DOC,
1153
+ output_type=CausalLMOutputWithCrossAttentions,
1154
+ config_class=_CONFIG_FOR_DOC,
1155
+ )
1156
+ def forward(
1157
+ self,
1158
+ input_ids: Optional[torch.Tensor] = None,
1159
+ attention_mask: Optional[torch.Tensor] = None,
1160
+ token_type_ids: Optional[torch.Tensor] = None,
1161
+ position_ids: Optional[torch.Tensor] = None,
1162
+ head_mask: Optional[torch.Tensor] = None,
1163
+ inputs_embeds: Optional[torch.Tensor] = None,
1164
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1165
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1166
+ labels: Optional[torch.Tensor] = None,
1167
+ past_key_values: Optional[List[torch.Tensor]] = None,
1168
+ use_cache: Optional[bool] = None,
1169
+ output_attentions: Optional[bool] = None,
1170
+ output_hidden_states: Optional[bool] = None,
1171
+ return_dict: Optional[bool] = None,
1172
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1173
+ r"""
1174
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1175
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1176
+ the model is configured as a decoder.
1177
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1178
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1179
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1180
+
1181
+ - 1 for tokens that are **not masked**,
1182
+ - 0 for tokens that are **masked**.
1183
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1184
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1185
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1186
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1187
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1188
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1189
+
1190
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1191
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1192
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1193
+ use_cache (`bool`, *optional*):
1194
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1195
+ `past_key_values`).
1196
+ """
1197
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1198
+ if labels is not None:
1199
+ use_cache = False
1200
+
1201
+ outputs = self.bert(
1202
+ input_ids,
1203
+ attention_mask=attention_mask,
1204
+ token_type_ids=token_type_ids,
1205
+ position_ids=position_ids,
1206
+ head_mask=head_mask,
1207
+ inputs_embeds=inputs_embeds,
1208
+ encoder_hidden_states=encoder_hidden_states,
1209
+ encoder_attention_mask=encoder_attention_mask,
1210
+ past_key_values=past_key_values,
1211
+ use_cache=use_cache,
1212
+ output_attentions=output_attentions,
1213
+ output_hidden_states=output_hidden_states,
1214
+ return_dict=return_dict,
1215
+ )
1216
+
1217
+ sequence_output = outputs[0]
1218
+ prediction_scores = self.cls(sequence_output)
1219
+
1220
+ lm_loss = None
1221
+ if labels is not None:
1222
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1223
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1224
+ labels = labels[:, 1:].contiguous()
1225
+ loss_fct = CrossEntropyLoss()
1226
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1227
+
1228
+ if not return_dict:
1229
+ output = (prediction_scores,) + outputs[2:]
1230
+ return ((lm_loss,) + output) if lm_loss is not None else output
1231
+
1232
+ return CausalLMOutputWithCrossAttentions(
1233
+ loss=lm_loss,
1234
+ logits=prediction_scores,
1235
+ past_key_values=outputs.past_key_values,
1236
+ hidden_states=outputs.hidden_states,
1237
+ attentions=outputs.attentions,
1238
+ cross_attentions=outputs.cross_attentions,
1239
+ )
1240
+
1241
+ def prepare_inputs_for_generation(
1242
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
1243
+ ):
1244
+ input_shape = input_ids.shape
1245
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1246
+ if attention_mask is None:
1247
+ attention_mask = input_ids.new_ones(input_shape)
1248
+
1249
+ # cut decoder_input_ids if past_key_values is used
1250
+ if past_key_values is not None:
1251
+ past_length = past_key_values[0][0].shape[2]
1252
+
1253
+ # Some generation methods already pass only the last input ID
1254
+ if input_ids.shape[1] > past_length:
1255
+ remove_prefix_length = past_length
1256
+ else:
1257
+ # Default to old behavior: keep only final ID
1258
+ remove_prefix_length = input_ids.shape[1] - 1
1259
+
1260
+ input_ids = input_ids[:, remove_prefix_length:]
1261
+
1262
+ return {
1263
+ "input_ids": input_ids,
1264
+ "attention_mask": attention_mask,
1265
+ "past_key_values": past_key_values,
1266
+ "use_cache": use_cache,
1267
+ }
1268
+
1269
+ def _reorder_cache(self, past_key_values, beam_idx):
1270
+ reordered_past = ()
1271
+ for layer_past in past_key_values:
1272
+ reordered_past += (
1273
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1274
+ )
1275
+ return reordered_past
1276
+
1277
+
1278
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
1279
+ class BertForMaskedLM(BertPreTrainedModel):
1280
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1281
+
1282
+ def __init__(self, config):
1283
+ super().__init__(config)
1284
+
1285
+ if config.is_decoder:
1286
+ logger.warning(
1287
+ "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
1288
+ "bi-directional self-attention."
1289
+ )
1290
+
1291
+ self.bert = BertModel(config, add_pooling_layer=False)
1292
+ self.cls = BertOnlyMLMHead(config)
1293
+
1294
+ # Initialize weights and apply final processing
1295
+ self.post_init()
1296
+
1297
+ def get_output_embeddings(self):
1298
+ return self.cls.predictions.decoder
1299
+
1300
+ def set_output_embeddings(self, new_embeddings):
1301
+ self.cls.predictions.decoder = new_embeddings
1302
+
1303
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1304
+ @add_code_sample_docstrings(
1305
+ checkpoint=_CHECKPOINT_FOR_DOC,
1306
+ output_type=MaskedLMOutput,
1307
+ config_class=_CONFIG_FOR_DOC,
1308
+ expected_output="'paris'",
1309
+ expected_loss=0.88,
1310
+ )
1311
+ def forward(
1312
+ self,
1313
+ input_ids: Optional[torch.Tensor] = None,
1314
+ attention_mask: Optional[torch.Tensor] = None,
1315
+ token_type_ids: Optional[torch.Tensor] = None,
1316
+ position_ids: Optional[torch.Tensor] = None,
1317
+ head_mask: Optional[torch.Tensor] = None,
1318
+ inputs_embeds: Optional[torch.Tensor] = None,
1319
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1320
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1321
+ labels: Optional[torch.Tensor] = None,
1322
+ output_attentions: Optional[bool] = None,
1323
+ output_hidden_states: Optional[bool] = None,
1324
+ return_dict: Optional[bool] = None,
1325
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1326
+ r"""
1327
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1328
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1329
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1330
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1331
+ """
1332
+
1333
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1334
+
1335
+ outputs = self.bert(
1336
+ input_ids,
1337
+ attention_mask=attention_mask,
1338
+ token_type_ids=token_type_ids,
1339
+ position_ids=position_ids,
1340
+ head_mask=head_mask,
1341
+ inputs_embeds=inputs_embeds,
1342
+ encoder_hidden_states=encoder_hidden_states,
1343
+ encoder_attention_mask=encoder_attention_mask,
1344
+ output_attentions=output_attentions,
1345
+ output_hidden_states=output_hidden_states,
1346
+ return_dict=return_dict,
1347
+ )
1348
+
1349
+ sequence_output = outputs[0]
1350
+ prediction_scores = self.cls(sequence_output)
1351
+
1352
+ masked_lm_loss = None
1353
+ if labels is not None:
1354
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1355
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1356
+
1357
+ if not return_dict:
1358
+ output = (prediction_scores,) + outputs[2:]
1359
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1360
+
1361
+ return MaskedLMOutput(
1362
+ loss=masked_lm_loss,
1363
+ logits=prediction_scores,
1364
+ hidden_states=outputs.hidden_states,
1365
+ attentions=outputs.attentions,
1366
+ )
1367
+
1368
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1369
+ input_shape = input_ids.shape
1370
+ effective_batch_size = input_shape[0]
1371
+
1372
+ # add a dummy token
1373
+ if self.config.pad_token_id is None:
1374
+ raise ValueError("The PAD token should be defined for generation")
1375
+
1376
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1377
+ dummy_token = torch.full(
1378
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1379
+ )
1380
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1381
+
1382
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1383
+
1384
+
1385
+ @add_start_docstrings(
1386
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1387
+ BERT_START_DOCSTRING,
1388
+ )
1389
+ class BertForNextSentencePrediction(BertPreTrainedModel):
1390
+ def __init__(self, config):
1391
+ super().__init__(config)
1392
+
1393
+ self.bert = BertModel(config)
1394
+ self.cls = BertOnlyNSPHead(config)
1395
+
1396
+ # Initialize weights and apply final processing
1397
+ self.post_init()
1398
+
1399
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1400
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1401
+ def forward(
1402
+ self,
1403
+ input_ids: Optional[torch.Tensor] = None,
1404
+ attention_mask: Optional[torch.Tensor] = None,
1405
+ token_type_ids: Optional[torch.Tensor] = None,
1406
+ position_ids: Optional[torch.Tensor] = None,
1407
+ head_mask: Optional[torch.Tensor] = None,
1408
+ inputs_embeds: Optional[torch.Tensor] = None,
1409
+ labels: Optional[torch.Tensor] = None,
1410
+ output_attentions: Optional[bool] = None,
1411
+ output_hidden_states: Optional[bool] = None,
1412
+ return_dict: Optional[bool] = None,
1413
+ **kwargs,
1414
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
1415
+ r"""
1416
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1417
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1418
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1419
+
1420
+ - 0 indicates sequence B is a continuation of sequence A,
1421
+ - 1 indicates sequence B is a random sequence.
1422
+
1423
+ Returns:
1424
+
1425
+ Example:
1426
+
1427
+ ```python
1428
+ >>> from transformers import AutoTokenizer, BertForNextSentencePrediction
1429
+ >>> import torch
1430
+
1431
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1432
+ >>> model = BertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1433
+
1434
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1435
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1436
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1437
+
1438
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1439
+ >>> logits = outputs.logits
1440
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1441
+ ```
1442
+ """
1443
+
1444
+ if "next_sentence_label" in kwargs:
1445
+ warnings.warn(
1446
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1447
+ " `labels` instead.",
1448
+ FutureWarning,
1449
+ )
1450
+ labels = kwargs.pop("next_sentence_label")
1451
+
1452
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1453
+
1454
+ outputs = self.bert(
1455
+ input_ids,
1456
+ attention_mask=attention_mask,
1457
+ token_type_ids=token_type_ids,
1458
+ position_ids=position_ids,
1459
+ head_mask=head_mask,
1460
+ inputs_embeds=inputs_embeds,
1461
+ output_attentions=output_attentions,
1462
+ output_hidden_states=output_hidden_states,
1463
+ return_dict=return_dict,
1464
+ )
1465
+
1466
+ pooled_output = outputs[1]
1467
+
1468
+ seq_relationship_scores = self.cls(pooled_output)
1469
+
1470
+ next_sentence_loss = None
1471
+ if labels is not None:
1472
+ loss_fct = CrossEntropyLoss()
1473
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1474
+
1475
+ if not return_dict:
1476
+ output = (seq_relationship_scores,) + outputs[2:]
1477
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1478
+
1479
+ return NextSentencePredictorOutput(
1480
+ loss=next_sentence_loss,
1481
+ logits=seq_relationship_scores,
1482
+ hidden_states=outputs.hidden_states,
1483
+ attentions=outputs.attentions,
1484
+ )
1485
+
1486
+
1487
+ @add_start_docstrings(
1488
+ """
1489
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1490
+ output) e.g. for GLUE tasks.
1491
+ """,
1492
+ BERT_START_DOCSTRING,
1493
+ )
1494
+ class BertForSequenceClassification(BertPreTrainedModel):
1495
+ def __init__(self, config):
1496
+ super().__init__(config)
1497
+ self.num_labels = config.num_labels
1498
+ self.config = config
1499
+
1500
+ self.bert = BertModel(config)
1501
+ classifier_dropout = (
1502
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1503
+ )
1504
+ self.dropout = nn.Dropout(classifier_dropout)
1505
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1506
+
1507
+ # Initialize weights and apply final processing
1508
+ self.post_init()
1509
+
1510
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1511
+ @add_code_sample_docstrings(
1512
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1513
+ output_type=SequenceClassifierOutput,
1514
+ config_class=_CONFIG_FOR_DOC,
1515
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1516
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1517
+ )
1518
+ def forward(
1519
+ self,
1520
+ input_ids: Optional[torch.Tensor] = None,
1521
+ attention_mask: Optional[torch.Tensor] = None,
1522
+ token_type_ids: Optional[torch.Tensor] = None,
1523
+ position_ids: Optional[torch.Tensor] = None,
1524
+ head_mask: Optional[torch.Tensor] = None,
1525
+ inputs_embeds: Optional[torch.Tensor] = None,
1526
+ labels: Optional[torch.Tensor] = None,
1527
+ output_attentions: Optional[bool] = None,
1528
+ output_hidden_states: Optional[bool] = None,
1529
+ return_dict: Optional[bool] = None,
1530
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1531
+ r"""
1532
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1533
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1534
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1535
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1536
+ """
1537
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1538
+
1539
+ outputs = self.bert(
1540
+ input_ids,
1541
+ attention_mask=attention_mask,
1542
+ token_type_ids=token_type_ids,
1543
+ position_ids=position_ids,
1544
+ head_mask=head_mask,
1545
+ inputs_embeds=inputs_embeds,
1546
+ output_attentions=output_attentions,
1547
+ output_hidden_states=output_hidden_states,
1548
+ return_dict=return_dict,
1549
+ )
1550
+
1551
+ pooled_output = outputs[1]
1552
+
1553
+ pooled_output = self.dropout(pooled_output)
1554
+ logits = self.classifier(pooled_output)
1555
+
1556
+ loss = None
1557
+ if labels is not None:
1558
+ if self.config.problem_type is None:
1559
+ if self.num_labels == 1:
1560
+ self.config.problem_type = "regression"
1561
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1562
+ self.config.problem_type = "single_label_classification"
1563
+ else:
1564
+ self.config.problem_type = "multi_label_classification"
1565
+
1566
+ if self.config.problem_type == "regression":
1567
+ loss_fct = MSELoss()
1568
+ if self.num_labels == 1:
1569
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1570
+ else:
1571
+ loss = loss_fct(logits, labels)
1572
+ elif self.config.problem_type == "single_label_classification":
1573
+ loss_fct = CrossEntropyLoss()
1574
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1575
+ elif self.config.problem_type == "multi_label_classification":
1576
+ loss_fct = BCEWithLogitsLoss()
1577
+ loss = loss_fct(logits, labels)
1578
+ if not return_dict:
1579
+ output = (logits,) + outputs[2:]
1580
+ return ((loss,) + output) if loss is not None else output
1581
+
1582
+ return SequenceClassifierOutput(
1583
+ loss=loss,
1584
+ logits=logits,
1585
+ hidden_states=outputs.hidden_states,
1586
+ attentions=outputs.attentions,
1587
+ )
1588
+
1589
+
1590
+ @add_start_docstrings(
1591
+ """
1592
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1593
+ softmax) e.g. for RocStories/SWAG tasks.
1594
+ """,
1595
+ BERT_START_DOCSTRING,
1596
+ )
1597
+ class BertForMultipleChoice(BertPreTrainedModel):
1598
+ def __init__(self, config):
1599
+ super().__init__(config)
1600
+
1601
+ self.bert = BertModel(config)
1602
+ classifier_dropout = (
1603
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1604
+ )
1605
+ self.dropout = nn.Dropout(classifier_dropout)
1606
+ self.classifier = nn.Linear(config.hidden_size, 1)
1607
+
1608
+ # Initialize weights and apply final processing
1609
+ self.post_init()
1610
+
1611
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1612
+ @add_code_sample_docstrings(
1613
+ checkpoint=_CHECKPOINT_FOR_DOC,
1614
+ output_type=MultipleChoiceModelOutput,
1615
+ config_class=_CONFIG_FOR_DOC,
1616
+ )
1617
+ def forward(
1618
+ self,
1619
+ input_ids: Optional[torch.Tensor] = None,
1620
+ attention_mask: Optional[torch.Tensor] = None,
1621
+ token_type_ids: Optional[torch.Tensor] = None,
1622
+ position_ids: Optional[torch.Tensor] = None,
1623
+ head_mask: Optional[torch.Tensor] = None,
1624
+ inputs_embeds: Optional[torch.Tensor] = None,
1625
+ labels: Optional[torch.Tensor] = None,
1626
+ output_attentions: Optional[bool] = None,
1627
+ output_hidden_states: Optional[bool] = None,
1628
+ return_dict: Optional[bool] = None,
1629
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1630
+ r"""
1631
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1632
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1633
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1634
+ `input_ids` above)
1635
+ """
1636
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1637
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1638
+
1639
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1640
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1641
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1642
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1643
+ inputs_embeds = (
1644
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1645
+ if inputs_embeds is not None
1646
+ else None
1647
+ )
1648
+
1649
+ outputs = self.bert(
1650
+ input_ids,
1651
+ attention_mask=attention_mask,
1652
+ token_type_ids=token_type_ids,
1653
+ position_ids=position_ids,
1654
+ head_mask=head_mask,
1655
+ inputs_embeds=inputs_embeds,
1656
+ output_attentions=output_attentions,
1657
+ output_hidden_states=output_hidden_states,
1658
+ return_dict=return_dict,
1659
+ )
1660
+
1661
+ pooled_output = outputs[1]
1662
+
1663
+ pooled_output = self.dropout(pooled_output)
1664
+ logits = self.classifier(pooled_output)
1665
+ reshaped_logits = logits.view(-1, num_choices)
1666
+
1667
+ loss = None
1668
+ if labels is not None:
1669
+ loss_fct = CrossEntropyLoss()
1670
+ loss = loss_fct(reshaped_logits, labels)
1671
+
1672
+ if not return_dict:
1673
+ output = (reshaped_logits,) + outputs[2:]
1674
+ return ((loss,) + output) if loss is not None else output
1675
+
1676
+ return MultipleChoiceModelOutput(
1677
+ loss=loss,
1678
+ logits=reshaped_logits,
1679
+ hidden_states=outputs.hidden_states,
1680
+ attentions=outputs.attentions,
1681
+ )
1682
+
1683
+
1684
+ @add_start_docstrings(
1685
+ """
1686
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1687
+ Named-Entity-Recognition (NER) tasks.
1688
+ """,
1689
+ BERT_START_DOCSTRING,
1690
+ )
1691
+ class BertForTokenClassification(BertPreTrainedModel):
1692
+ def __init__(self, config):
1693
+ super().__init__(config)
1694
+ self.num_labels = config.num_labels
1695
+
1696
+ self.bert = BertModel(config, add_pooling_layer=False)
1697
+ classifier_dropout = (
1698
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1699
+ )
1700
+ self.dropout = nn.Dropout(classifier_dropout)
1701
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1702
+
1703
+ # Initialize weights and apply final processing
1704
+ self.post_init()
1705
+
1706
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1707
+ @add_code_sample_docstrings(
1708
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1709
+ output_type=TokenClassifierOutput,
1710
+ config_class=_CONFIG_FOR_DOC,
1711
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1712
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1713
+ )
1714
+ def forward(
1715
+ self,
1716
+ input_ids: Optional[torch.Tensor] = None,
1717
+ attention_mask: Optional[torch.Tensor] = None,
1718
+ token_type_ids: Optional[torch.Tensor] = None,
1719
+ position_ids: Optional[torch.Tensor] = None,
1720
+ head_mask: Optional[torch.Tensor] = None,
1721
+ inputs_embeds: Optional[torch.Tensor] = None,
1722
+ labels: Optional[torch.Tensor] = None,
1723
+ output_attentions: Optional[bool] = None,
1724
+ output_hidden_states: Optional[bool] = None,
1725
+ return_dict: Optional[bool] = None,
1726
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1727
+ r"""
1728
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1729
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1730
+ """
1731
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1732
+
1733
+ outputs = self.bert(
1734
+ input_ids,
1735
+ attention_mask=attention_mask,
1736
+ token_type_ids=token_type_ids,
1737
+ position_ids=position_ids,
1738
+ head_mask=head_mask,
1739
+ inputs_embeds=inputs_embeds,
1740
+ output_attentions=output_attentions,
1741
+ output_hidden_states=output_hidden_states,
1742
+ return_dict=return_dict,
1743
+ )
1744
+
1745
+ sequence_output = outputs[0]
1746
+
1747
+ sequence_output = self.dropout(sequence_output)
1748
+ logits = self.classifier(sequence_output)
1749
+
1750
+ loss = None
1751
+ if labels is not None:
1752
+ loss_fct = CrossEntropyLoss()
1753
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1754
+
1755
+ if not return_dict:
1756
+ output = (logits,) + outputs[2:]
1757
+ return ((loss,) + output) if loss is not None else output
1758
+
1759
+ return TokenClassifierOutput(
1760
+ loss=loss,
1761
+ logits=logits,
1762
+ hidden_states=outputs.hidden_states,
1763
+ attentions=outputs.attentions,
1764
+ )
1765
+
1766
+
1767
+ @add_start_docstrings(
1768
+ """
1769
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1770
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1771
+ """,
1772
+ BERT_START_DOCSTRING,
1773
+ )
1774
+ class BertForQuestionAnswering(BertPreTrainedModel):
1775
+ def __init__(self, config):
1776
+ super().__init__(config)
1777
+ self.num_labels = config.num_labels
1778
+
1779
+ self.bert = BertModel(config, add_pooling_layer=False)
1780
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1781
+
1782
+ # Initialize weights and apply final processing
1783
+ self.post_init()
1784
+
1785
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1786
+ @add_code_sample_docstrings(
1787
+ checkpoint=_CHECKPOINT_FOR_QA,
1788
+ output_type=QuestionAnsweringModelOutput,
1789
+ config_class=_CONFIG_FOR_DOC,
1790
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1791
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1792
+ expected_output=_QA_EXPECTED_OUTPUT,
1793
+ expected_loss=_QA_EXPECTED_LOSS,
1794
+ )
1795
+ def forward(
1796
+ self,
1797
+ input_ids: Optional[torch.Tensor] = None,
1798
+ attention_mask: Optional[torch.Tensor] = None,
1799
+ token_type_ids: Optional[torch.Tensor] = None,
1800
+ position_ids: Optional[torch.Tensor] = None,
1801
+ head_mask: Optional[torch.Tensor] = None,
1802
+ inputs_embeds: Optional[torch.Tensor] = None,
1803
+ start_positions: Optional[torch.Tensor] = None,
1804
+ end_positions: Optional[torch.Tensor] = None,
1805
+ output_attentions: Optional[bool] = None,
1806
+ output_hidden_states: Optional[bool] = None,
1807
+ return_dict: Optional[bool] = None,
1808
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1809
+ r"""
1810
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1811
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1812
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1813
+ are not taken into account for computing the loss.
1814
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1815
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1816
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1817
+ are not taken into account for computing the loss.
1818
+ """
1819
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1820
+
1821
+ outputs = self.bert(
1822
+ input_ids,
1823
+ attention_mask=attention_mask,
1824
+ token_type_ids=token_type_ids,
1825
+ position_ids=position_ids,
1826
+ head_mask=head_mask,
1827
+ inputs_embeds=inputs_embeds,
1828
+ output_attentions=output_attentions,
1829
+ output_hidden_states=output_hidden_states,
1830
+ return_dict=return_dict,
1831
+ )
1832
+
1833
+ sequence_output = outputs[0]
1834
+
1835
+ logits = self.qa_outputs(sequence_output)
1836
+ start_logits, end_logits = logits.split(1, dim=-1)
1837
+ start_logits = start_logits.squeeze(-1).contiguous()
1838
+ end_logits = end_logits.squeeze(-1).contiguous()
1839
+
1840
+ total_loss = None
1841
+ if start_positions is not None and end_positions is not None:
1842
+ # If we are on multi-GPU, split add a dimension
1843
+ if len(start_positions.size()) > 1:
1844
+ start_positions = start_positions.squeeze(-1)
1845
+ if len(end_positions.size()) > 1:
1846
+ end_positions = end_positions.squeeze(-1)
1847
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1848
+ ignored_index = start_logits.size(1)
1849
+ start_positions = start_positions.clamp(0, ignored_index)
1850
+ end_positions = end_positions.clamp(0, ignored_index)
1851
+
1852
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1853
+ start_loss = loss_fct(start_logits, start_positions)
1854
+ end_loss = loss_fct(end_logits, end_positions)
1855
+ total_loss = (start_loss + end_loss) / 2
1856
+
1857
+ if not return_dict:
1858
+ output = (start_logits, end_logits) + outputs[2:]
1859
+ return ((total_loss,) + output) if total_loss is not None else output
1860
+
1861
+ return QuestionAnsweringModelOutput(
1862
+ loss=total_loss,
1863
+ start_logits=start_logits,
1864
+ end_logits=end_logits,
1865
+ hidden_states=outputs.hidden_states,
1866
+ attentions=outputs.attentions,
1867
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/modeling_flax_bert.py ADDED
@@ -0,0 +1,1713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Callable, Optional, Tuple
17
+
18
+ import flax
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen import combine_masks, make_causal_mask
25
+ from flax.linen import partitioning as nn_partitioning
26
+ from flax.linen.attention import dot_product_attention_weights
27
+ from flax.traverse_util import flatten_dict, unflatten_dict
28
+ from jax import lax
29
+
30
+ from ...modeling_flax_outputs import (
31
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
32
+ FlaxBaseModelOutputWithPooling,
33
+ FlaxBaseModelOutputWithPoolingAndCrossAttentions,
34
+ FlaxCausalLMOutputWithCrossAttentions,
35
+ FlaxMaskedLMOutput,
36
+ FlaxMultipleChoiceModelOutput,
37
+ FlaxNextSentencePredictorOutput,
38
+ FlaxQuestionAnsweringModelOutput,
39
+ FlaxSequenceClassifierOutput,
40
+ FlaxTokenClassifierOutput,
41
+ )
42
+ from ...modeling_flax_utils import (
43
+ ACT2FN,
44
+ FlaxPreTrainedModel,
45
+ append_call_sample_docstring,
46
+ append_replace_return_docstrings,
47
+ overwrite_call_docstring,
48
+ )
49
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
50
+ from .configuration_bert import BertConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
56
+ _CONFIG_FOR_DOC = "BertConfig"
57
+
58
+ remat = nn_partitioning.remat
59
+
60
+
61
+ @flax.struct.dataclass
62
+ class FlaxBertForPreTrainingOutput(ModelOutput):
63
+ """
64
+ Output type of [`BertForPreTraining`].
65
+
66
+ Args:
67
+ prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
68
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
69
+ seq_relationship_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
70
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
71
+ before SoftMax).
72
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
73
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
74
+ `(batch_size, sequence_length, hidden_size)`.
75
+
76
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
77
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
78
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
79
+ sequence_length)`.
80
+
81
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
82
+ heads.
83
+ """
84
+
85
+ prediction_logits: jnp.ndarray = None
86
+ seq_relationship_logits: jnp.ndarray = None
87
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
88
+ attentions: Optional[Tuple[jnp.ndarray]] = None
89
+
90
+
91
+ BERT_START_DOCSTRING = r"""
92
+
93
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
94
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
95
+
96
+ This model is also a
97
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
98
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
99
+ behavior.
100
+
101
+ Finally, this model supports inherent JAX features such as:
102
+
103
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
104
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
105
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
106
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
107
+
108
+ Parameters:
109
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
110
+ Initializing with a config file does not load the weights associated with the model, only the
111
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
112
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
113
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
114
+ `jax.numpy.bfloat16` (on TPUs).
115
+
116
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
117
+ specified all the computation will be performed with the given `dtype`.
118
+
119
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
120
+ parameters.**
121
+
122
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
123
+ [`~FlaxPreTrainedModel.to_bf16`].
124
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
125
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
126
+ `jax.numpy.bfloat16` (on TPUs).
127
+
128
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
129
+ specified all the computation will be performed with the given `dtype`.
130
+
131
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
132
+ parameters.**
133
+
134
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
135
+ [`~FlaxPreTrainedModel.to_bf16`].
136
+
137
+ """
138
+
139
+ BERT_INPUTS_DOCSTRING = r"""
140
+ Args:
141
+ input_ids (`numpy.ndarray` of shape `({0})`):
142
+ Indices of input sequence tokens in the vocabulary.
143
+
144
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
145
+ [`PreTrainedTokenizer.__call__`] for details.
146
+
147
+ [What are input IDs?](../glossary#input-ids)
148
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
149
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
150
+
151
+ - 1 for tokens that are **not masked**,
152
+ - 0 for tokens that are **masked**.
153
+
154
+ [What are attention masks?](../glossary#attention-mask)
155
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
156
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
157
+ 1]`:
158
+
159
+ - 0 corresponds to a *sentence A* token,
160
+ - 1 corresponds to a *sentence B* token.
161
+
162
+ [What are token type IDs?](../glossary#token-type-ids)
163
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
164
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
165
+ config.max_position_embeddings - 1]`.
166
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
167
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
168
+
169
+ - 1 indicates the head is **not masked**,
170
+ - 0 indicates the head is **masked**.
171
+
172
+ return_dict (`bool`, *optional*):
173
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
174
+
175
+ """
176
+
177
+
178
+ class FlaxBertEmbeddings(nn.Module):
179
+ """Construct the embeddings from word, position and token_type embeddings."""
180
+
181
+ config: BertConfig
182
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
183
+
184
+ def setup(self):
185
+ self.word_embeddings = nn.Embed(
186
+ self.config.vocab_size,
187
+ self.config.hidden_size,
188
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
189
+ dtype=self.dtype,
190
+ )
191
+ self.position_embeddings = nn.Embed(
192
+ self.config.max_position_embeddings,
193
+ self.config.hidden_size,
194
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
195
+ dtype=self.dtype,
196
+ )
197
+ self.token_type_embeddings = nn.Embed(
198
+ self.config.type_vocab_size,
199
+ self.config.hidden_size,
200
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
201
+ dtype=self.dtype,
202
+ )
203
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
204
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
205
+
206
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
207
+ # Embed
208
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
209
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
210
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
211
+
212
+ # Sum all embeddings
213
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
214
+
215
+ # Layer Norm
216
+ hidden_states = self.LayerNorm(hidden_states)
217
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
218
+ return hidden_states
219
+
220
+
221
+ class FlaxBertSelfAttention(nn.Module):
222
+ config: BertConfig
223
+ causal: bool = False
224
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
225
+
226
+ def setup(self):
227
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
228
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
229
+ raise ValueError(
230
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
231
+ " : {self.config.num_attention_heads}"
232
+ )
233
+
234
+ self.query = nn.Dense(
235
+ self.config.hidden_size,
236
+ dtype=self.dtype,
237
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
238
+ )
239
+ self.key = nn.Dense(
240
+ self.config.hidden_size,
241
+ dtype=self.dtype,
242
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
243
+ )
244
+ self.value = nn.Dense(
245
+ self.config.hidden_size,
246
+ dtype=self.dtype,
247
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
248
+ )
249
+
250
+ if self.causal:
251
+ self.causal_mask = make_causal_mask(
252
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
253
+ )
254
+
255
+ def _split_heads(self, hidden_states):
256
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
257
+
258
+ def _merge_heads(self, hidden_states):
259
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
260
+
261
+ @nn.compact
262
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
263
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
264
+ """
265
+ This function takes projected key, value states from a single input token and concatenates the states to cached
266
+ states from previous steps. This function is slighly adapted from the official Flax repository:
267
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
268
+ """
269
+ # detect if we're initializing by absence of existing cache data.
270
+ is_initialized = self.has_variable("cache", "cached_key")
271
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
272
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
273
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
274
+
275
+ if is_initialized:
276
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
277
+ # update key, value caches with our new 1d spatial slices
278
+ cur_index = cache_index.value
279
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
280
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
281
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
282
+ cached_key.value = key
283
+ cached_value.value = value
284
+ num_updated_cache_vectors = query.shape[1]
285
+ cache_index.value = cache_index.value + num_updated_cache_vectors
286
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
287
+ pad_mask = jnp.broadcast_to(
288
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
289
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
290
+ )
291
+ attention_mask = combine_masks(pad_mask, attention_mask)
292
+ return key, value, attention_mask
293
+
294
+ def __call__(
295
+ self,
296
+ hidden_states,
297
+ attention_mask,
298
+ layer_head_mask,
299
+ key_value_states: Optional[jnp.ndarray] = None,
300
+ init_cache: bool = False,
301
+ deterministic=True,
302
+ output_attentions: bool = False,
303
+ ):
304
+ # if key_value_states are provided this layer is used as a cross-attention layer
305
+ # for the decoder
306
+ is_cross_attention = key_value_states is not None
307
+ batch_size = hidden_states.shape[0]
308
+
309
+ # get query proj
310
+ query_states = self.query(hidden_states)
311
+ # get key, value proj
312
+ if is_cross_attention:
313
+ # cross_attentions
314
+ key_states = self.key(key_value_states)
315
+ value_states = self.value(key_value_states)
316
+ else:
317
+ # self_attention
318
+ key_states = self.key(hidden_states)
319
+ value_states = self.value(hidden_states)
320
+
321
+ query_states = self._split_heads(query_states)
322
+ key_states = self._split_heads(key_states)
323
+ value_states = self._split_heads(value_states)
324
+
325
+ # handle cache prepare causal attention mask
326
+ if self.causal:
327
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
328
+ if self.has_variable("cache", "cached_key"):
329
+ mask_shift = self.variables["cache"]["cache_index"]
330
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
331
+ causal_mask = lax.dynamic_slice(
332
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
333
+ )
334
+ else:
335
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
336
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
337
+
338
+ # combine masks if needed
339
+ if attention_mask is not None and self.causal:
340
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
341
+ attention_mask = combine_masks(attention_mask, causal_mask)
342
+ elif self.causal:
343
+ attention_mask = causal_mask
344
+ elif attention_mask is not None:
345
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
346
+
347
+ # During fast autoregressive decoding, we feed one position at a time,
348
+ # and cache the keys and values step by step.
349
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
350
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
351
+ key_states, value_states, query_states, attention_mask
352
+ )
353
+
354
+ # Convert the boolean attention mask to an attention bias.
355
+ if attention_mask is not None:
356
+ # attention mask in the form of attention bias
357
+ attention_bias = lax.select(
358
+ attention_mask > 0,
359
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
360
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
361
+ )
362
+ else:
363
+ attention_bias = None
364
+
365
+ dropout_rng = None
366
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
367
+ dropout_rng = self.make_rng("dropout")
368
+
369
+ attn_weights = dot_product_attention_weights(
370
+ query_states,
371
+ key_states,
372
+ bias=attention_bias,
373
+ dropout_rng=dropout_rng,
374
+ dropout_rate=self.config.attention_probs_dropout_prob,
375
+ broadcast_dropout=True,
376
+ deterministic=deterministic,
377
+ dtype=self.dtype,
378
+ precision=None,
379
+ )
380
+
381
+ # Mask heads if we want to
382
+ if layer_head_mask is not None:
383
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
384
+
385
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
386
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
387
+
388
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
389
+ return outputs
390
+
391
+
392
+ class FlaxBertSelfOutput(nn.Module):
393
+ config: BertConfig
394
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
395
+
396
+ def setup(self):
397
+ self.dense = nn.Dense(
398
+ self.config.hidden_size,
399
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
400
+ dtype=self.dtype,
401
+ )
402
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
403
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
404
+
405
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
406
+ hidden_states = self.dense(hidden_states)
407
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
408
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
409
+ return hidden_states
410
+
411
+
412
+ class FlaxBertAttention(nn.Module):
413
+ config: BertConfig
414
+ causal: bool = False
415
+ dtype: jnp.dtype = jnp.float32
416
+
417
+ def setup(self):
418
+ self.self = FlaxBertSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
419
+ self.output = FlaxBertSelfOutput(self.config, dtype=self.dtype)
420
+
421
+ def __call__(
422
+ self,
423
+ hidden_states,
424
+ attention_mask,
425
+ layer_head_mask,
426
+ key_value_states=None,
427
+ init_cache=False,
428
+ deterministic=True,
429
+ output_attentions: bool = False,
430
+ ):
431
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
432
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
433
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
434
+ attn_outputs = self.self(
435
+ hidden_states,
436
+ attention_mask,
437
+ layer_head_mask=layer_head_mask,
438
+ key_value_states=key_value_states,
439
+ init_cache=init_cache,
440
+ deterministic=deterministic,
441
+ output_attentions=output_attentions,
442
+ )
443
+ attn_output = attn_outputs[0]
444
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
445
+
446
+ outputs = (hidden_states,)
447
+
448
+ if output_attentions:
449
+ outputs += (attn_outputs[1],)
450
+
451
+ return outputs
452
+
453
+
454
+ class FlaxBertIntermediate(nn.Module):
455
+ config: BertConfig
456
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
457
+
458
+ def setup(self):
459
+ self.dense = nn.Dense(
460
+ self.config.intermediate_size,
461
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
462
+ dtype=self.dtype,
463
+ )
464
+ self.activation = ACT2FN[self.config.hidden_act]
465
+
466
+ def __call__(self, hidden_states):
467
+ hidden_states = self.dense(hidden_states)
468
+ hidden_states = self.activation(hidden_states)
469
+ return hidden_states
470
+
471
+
472
+ class FlaxBertOutput(nn.Module):
473
+ config: BertConfig
474
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
475
+
476
+ def setup(self):
477
+ self.dense = nn.Dense(
478
+ self.config.hidden_size,
479
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
480
+ dtype=self.dtype,
481
+ )
482
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
483
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
484
+
485
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
486
+ hidden_states = self.dense(hidden_states)
487
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
488
+ hidden_states = self.LayerNorm(hidden_states + attention_output)
489
+ return hidden_states
490
+
491
+
492
+ class FlaxBertLayer(nn.Module):
493
+ config: BertConfig
494
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
495
+
496
+ def setup(self):
497
+ self.attention = FlaxBertAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
498
+ self.intermediate = FlaxBertIntermediate(self.config, dtype=self.dtype)
499
+ self.output = FlaxBertOutput(self.config, dtype=self.dtype)
500
+ if self.config.add_cross_attention:
501
+ self.crossattention = FlaxBertAttention(self.config, causal=False, dtype=self.dtype)
502
+
503
+ def __call__(
504
+ self,
505
+ hidden_states,
506
+ attention_mask,
507
+ layer_head_mask,
508
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
509
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
510
+ init_cache: bool = False,
511
+ deterministic: bool = True,
512
+ output_attentions: bool = False,
513
+ ):
514
+ # Self Attention
515
+ attention_outputs = self.attention(
516
+ hidden_states,
517
+ attention_mask,
518
+ layer_head_mask=layer_head_mask,
519
+ init_cache=init_cache,
520
+ deterministic=deterministic,
521
+ output_attentions=output_attentions,
522
+ )
523
+ attention_output = attention_outputs[0]
524
+
525
+ # Cross-Attention Block
526
+ if encoder_hidden_states is not None:
527
+ cross_attention_outputs = self.crossattention(
528
+ attention_output,
529
+ attention_mask=encoder_attention_mask,
530
+ layer_head_mask=layer_head_mask,
531
+ key_value_states=encoder_hidden_states,
532
+ deterministic=deterministic,
533
+ output_attentions=output_attentions,
534
+ )
535
+ attention_output = cross_attention_outputs[0]
536
+
537
+ hidden_states = self.intermediate(attention_output)
538
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
539
+
540
+ outputs = (hidden_states,)
541
+
542
+ if output_attentions:
543
+ outputs += (attention_outputs[1],)
544
+ if encoder_hidden_states is not None:
545
+ outputs += (cross_attention_outputs[1],)
546
+ return outputs
547
+
548
+
549
+ class FlaxBertLayerCollection(nn.Module):
550
+ config: BertConfig
551
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
552
+ gradient_checkpointing: bool = False
553
+
554
+ def setup(self):
555
+ if self.gradient_checkpointing:
556
+ FlaxBertCheckpointLayer = remat(FlaxBertLayer, static_argnums=(5, 6, 7))
557
+ self.layers = [
558
+ FlaxBertCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
559
+ for i in range(self.config.num_hidden_layers)
560
+ ]
561
+ else:
562
+ self.layers = [
563
+ FlaxBertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
564
+ ]
565
+
566
+ def __call__(
567
+ self,
568
+ hidden_states,
569
+ attention_mask,
570
+ head_mask,
571
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
572
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
573
+ init_cache: bool = False,
574
+ deterministic: bool = True,
575
+ output_attentions: bool = False,
576
+ output_hidden_states: bool = False,
577
+ return_dict: bool = True,
578
+ ):
579
+ all_attentions = () if output_attentions else None
580
+ all_hidden_states = () if output_hidden_states else None
581
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
582
+
583
+ # Check if head_mask has a correct number of layers specified if desired
584
+ if head_mask is not None:
585
+ if head_mask.shape[0] != (len(self.layers)):
586
+ raise ValueError(
587
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
588
+ f" {head_mask.shape[0]}."
589
+ )
590
+
591
+ for i, layer in enumerate(self.layers):
592
+ if output_hidden_states:
593
+ all_hidden_states += (hidden_states,)
594
+
595
+ layer_outputs = layer(
596
+ hidden_states,
597
+ attention_mask,
598
+ head_mask[i] if head_mask is not None else None,
599
+ encoder_hidden_states,
600
+ encoder_attention_mask,
601
+ init_cache,
602
+ deterministic,
603
+ output_attentions,
604
+ )
605
+
606
+ hidden_states = layer_outputs[0]
607
+
608
+ if output_attentions:
609
+ all_attentions += (layer_outputs[1],)
610
+
611
+ if encoder_hidden_states is not None:
612
+ all_cross_attentions += (layer_outputs[2],)
613
+
614
+ if output_hidden_states:
615
+ all_hidden_states += (hidden_states,)
616
+
617
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
618
+
619
+ if not return_dict:
620
+ return tuple(v for v in outputs if v is not None)
621
+
622
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
623
+ last_hidden_state=hidden_states,
624
+ hidden_states=all_hidden_states,
625
+ attentions=all_attentions,
626
+ cross_attentions=all_cross_attentions,
627
+ )
628
+
629
+
630
+ class FlaxBertEncoder(nn.Module):
631
+ config: BertConfig
632
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
633
+ gradient_checkpointing: bool = False
634
+
635
+ def setup(self):
636
+ self.layer = FlaxBertLayerCollection(
637
+ self.config,
638
+ dtype=self.dtype,
639
+ gradient_checkpointing=self.gradient_checkpointing,
640
+ )
641
+
642
+ def __call__(
643
+ self,
644
+ hidden_states,
645
+ attention_mask,
646
+ head_mask,
647
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
648
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
649
+ init_cache: bool = False,
650
+ deterministic: bool = True,
651
+ output_attentions: bool = False,
652
+ output_hidden_states: bool = False,
653
+ return_dict: bool = True,
654
+ ):
655
+ return self.layer(
656
+ hidden_states,
657
+ attention_mask,
658
+ head_mask=head_mask,
659
+ encoder_hidden_states=encoder_hidden_states,
660
+ encoder_attention_mask=encoder_attention_mask,
661
+ init_cache=init_cache,
662
+ deterministic=deterministic,
663
+ output_attentions=output_attentions,
664
+ output_hidden_states=output_hidden_states,
665
+ return_dict=return_dict,
666
+ )
667
+
668
+
669
+ class FlaxBertPooler(nn.Module):
670
+ config: BertConfig
671
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
672
+
673
+ def setup(self):
674
+ self.dense = nn.Dense(
675
+ self.config.hidden_size,
676
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
677
+ dtype=self.dtype,
678
+ )
679
+
680
+ def __call__(self, hidden_states):
681
+ cls_hidden_state = hidden_states[:, 0]
682
+ cls_hidden_state = self.dense(cls_hidden_state)
683
+ return nn.tanh(cls_hidden_state)
684
+
685
+
686
+ class FlaxBertPredictionHeadTransform(nn.Module):
687
+ config: BertConfig
688
+ dtype: jnp.dtype = jnp.float32
689
+
690
+ def setup(self):
691
+ self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
692
+ self.activation = ACT2FN[self.config.hidden_act]
693
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
694
+
695
+ def __call__(self, hidden_states):
696
+ hidden_states = self.dense(hidden_states)
697
+ hidden_states = self.activation(hidden_states)
698
+ return self.LayerNorm(hidden_states)
699
+
700
+
701
+ class FlaxBertLMPredictionHead(nn.Module):
702
+ config: BertConfig
703
+ dtype: jnp.dtype = jnp.float32
704
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
705
+
706
+ def setup(self):
707
+ self.transform = FlaxBertPredictionHeadTransform(self.config, dtype=self.dtype)
708
+ self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
709
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
710
+
711
+ def __call__(self, hidden_states, shared_embedding=None):
712
+ hidden_states = self.transform(hidden_states)
713
+
714
+ if shared_embedding is not None:
715
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
716
+ else:
717
+ hidden_states = self.decoder(hidden_states)
718
+
719
+ bias = jnp.asarray(self.bias, self.dtype)
720
+ hidden_states += bias
721
+ return hidden_states
722
+
723
+
724
+ class FlaxBertOnlyMLMHead(nn.Module):
725
+ config: BertConfig
726
+ dtype: jnp.dtype = jnp.float32
727
+
728
+ def setup(self):
729
+ self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
730
+
731
+ def __call__(self, hidden_states, shared_embedding=None):
732
+ hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding)
733
+ return hidden_states
734
+
735
+
736
+ class FlaxBertOnlyNSPHead(nn.Module):
737
+ dtype: jnp.dtype = jnp.float32
738
+
739
+ def setup(self):
740
+ self.seq_relationship = nn.Dense(2, dtype=self.dtype)
741
+
742
+ def __call__(self, pooled_output):
743
+ return self.seq_relationship(pooled_output)
744
+
745
+
746
+ class FlaxBertPreTrainingHeads(nn.Module):
747
+ config: BertConfig
748
+ dtype: jnp.dtype = jnp.float32
749
+
750
+ def setup(self):
751
+ self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)
752
+ self.seq_relationship = nn.Dense(2, dtype=self.dtype)
753
+
754
+ def __call__(self, hidden_states, pooled_output, shared_embedding=None):
755
+ prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
756
+ seq_relationship_score = self.seq_relationship(pooled_output)
757
+ return prediction_scores, seq_relationship_score
758
+
759
+
760
+ class FlaxBertPreTrainedModel(FlaxPreTrainedModel):
761
+ """
762
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
763
+ models.
764
+ """
765
+
766
+ config_class = BertConfig
767
+ base_model_prefix = "bert"
768
+ module_class: nn.Module = None
769
+
770
+ def __init__(
771
+ self,
772
+ config: BertConfig,
773
+ input_shape: Tuple = (1, 1),
774
+ seed: int = 0,
775
+ dtype: jnp.dtype = jnp.float32,
776
+ _do_init: bool = True,
777
+ gradient_checkpointing: bool = False,
778
+ **kwargs,
779
+ ):
780
+ module = self.module_class(
781
+ config=config,
782
+ dtype=dtype,
783
+ gradient_checkpointing=gradient_checkpointing,
784
+ **kwargs,
785
+ )
786
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
787
+
788
+ def enable_gradient_checkpointing(self):
789
+ self._module = self.module_class(
790
+ config=self.config,
791
+ dtype=self.dtype,
792
+ gradient_checkpointing=True,
793
+ )
794
+
795
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
796
+ # init input tensors
797
+ input_ids = jnp.zeros(input_shape, dtype="i4")
798
+ token_type_ids = jnp.zeros_like(input_ids)
799
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
800
+ attention_mask = jnp.ones_like(input_ids)
801
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
802
+
803
+ params_rng, dropout_rng = jax.random.split(rng)
804
+ rngs = {"params": params_rng, "dropout": dropout_rng}
805
+
806
+ if self.config.add_cross_attention:
807
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
808
+ encoder_attention_mask = attention_mask
809
+ module_init_outputs = self.module.init(
810
+ rngs,
811
+ input_ids,
812
+ attention_mask,
813
+ token_type_ids,
814
+ position_ids,
815
+ head_mask,
816
+ encoder_hidden_states,
817
+ encoder_attention_mask,
818
+ return_dict=False,
819
+ )
820
+ else:
821
+ module_init_outputs = self.module.init(
822
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
823
+ )
824
+
825
+ random_params = module_init_outputs["params"]
826
+
827
+ if params is not None:
828
+ random_params = flatten_dict(unfreeze(random_params))
829
+ params = flatten_dict(unfreeze(params))
830
+ for missing_key in self._missing_keys:
831
+ params[missing_key] = random_params[missing_key]
832
+ self._missing_keys = set()
833
+ return freeze(unflatten_dict(params))
834
+ else:
835
+ return random_params
836
+
837
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
838
+ def init_cache(self, batch_size, max_length):
839
+ r"""
840
+ Args:
841
+ batch_size (`int`):
842
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
843
+ max_length (`int`):
844
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
845
+ cache.
846
+ """
847
+ # init input variables to retrieve cache
848
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
849
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
850
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
851
+
852
+ init_variables = self.module.init(
853
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
854
+ )
855
+ return unfreeze(init_variables["cache"])
856
+
857
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
858
+ def __call__(
859
+ self,
860
+ input_ids,
861
+ attention_mask=None,
862
+ token_type_ids=None,
863
+ position_ids=None,
864
+ head_mask=None,
865
+ encoder_hidden_states=None,
866
+ encoder_attention_mask=None,
867
+ params: dict = None,
868
+ dropout_rng: jax.random.PRNGKey = None,
869
+ train: bool = False,
870
+ output_attentions: Optional[bool] = None,
871
+ output_hidden_states: Optional[bool] = None,
872
+ return_dict: Optional[bool] = None,
873
+ past_key_values: dict = None,
874
+ ):
875
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
876
+ output_hidden_states = (
877
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
878
+ )
879
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
880
+
881
+ # init input tensors if not passed
882
+ if token_type_ids is None:
883
+ token_type_ids = jnp.zeros_like(input_ids)
884
+
885
+ if position_ids is None:
886
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
887
+
888
+ if attention_mask is None:
889
+ attention_mask = jnp.ones_like(input_ids)
890
+
891
+ if head_mask is None:
892
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
893
+
894
+ # Handle any PRNG if needed
895
+ rngs = {}
896
+ if dropout_rng is not None:
897
+ rngs["dropout"] = dropout_rng
898
+
899
+ inputs = {"params": params or self.params}
900
+
901
+ if self.config.add_cross_attention:
902
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
903
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
904
+ # changed by FlaxBertAttention module
905
+ if past_key_values:
906
+ inputs["cache"] = past_key_values
907
+ mutable = ["cache"]
908
+ else:
909
+ mutable = False
910
+
911
+ outputs = self.module.apply(
912
+ inputs,
913
+ jnp.array(input_ids, dtype="i4"),
914
+ jnp.array(attention_mask, dtype="i4"),
915
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
916
+ position_ids=jnp.array(position_ids, dtype="i4"),
917
+ head_mask=jnp.array(head_mask, dtype="i4"),
918
+ encoder_hidden_states=encoder_hidden_states,
919
+ encoder_attention_mask=encoder_attention_mask,
920
+ deterministic=not train,
921
+ output_attentions=output_attentions,
922
+ output_hidden_states=output_hidden_states,
923
+ return_dict=return_dict,
924
+ rngs=rngs,
925
+ mutable=mutable,
926
+ )
927
+
928
+ # add updated cache to model output
929
+ if past_key_values is not None and return_dict:
930
+ outputs, past_key_values = outputs
931
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
932
+ return outputs
933
+ elif past_key_values is not None and not return_dict:
934
+ outputs, past_key_values = outputs
935
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
936
+
937
+ else:
938
+ outputs = self.module.apply(
939
+ inputs,
940
+ jnp.array(input_ids, dtype="i4"),
941
+ jnp.array(attention_mask, dtype="i4"),
942
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
943
+ position_ids=jnp.array(position_ids, dtype="i4"),
944
+ head_mask=jnp.array(head_mask, dtype="i4"),
945
+ deterministic=not train,
946
+ output_attentions=output_attentions,
947
+ output_hidden_states=output_hidden_states,
948
+ return_dict=return_dict,
949
+ rngs=rngs,
950
+ )
951
+
952
+ return outputs
953
+
954
+
955
+ class FlaxBertModule(nn.Module):
956
+ config: BertConfig
957
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
958
+ add_pooling_layer: bool = True
959
+ gradient_checkpointing: bool = False
960
+
961
+ def setup(self):
962
+ self.embeddings = FlaxBertEmbeddings(self.config, dtype=self.dtype)
963
+ self.encoder = FlaxBertEncoder(
964
+ self.config,
965
+ dtype=self.dtype,
966
+ gradient_checkpointing=self.gradient_checkpointing,
967
+ )
968
+ self.pooler = FlaxBertPooler(self.config, dtype=self.dtype)
969
+
970
+ def __call__(
971
+ self,
972
+ input_ids,
973
+ attention_mask,
974
+ token_type_ids: Optional[jnp.ndarray] = None,
975
+ position_ids: Optional[jnp.ndarray] = None,
976
+ head_mask: Optional[jnp.ndarray] = None,
977
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
978
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
979
+ init_cache: bool = False,
980
+ deterministic: bool = True,
981
+ output_attentions: bool = False,
982
+ output_hidden_states: bool = False,
983
+ return_dict: bool = True,
984
+ ):
985
+ # make sure `token_type_ids` is correctly initialized when not passed
986
+ if token_type_ids is None:
987
+ token_type_ids = jnp.zeros_like(input_ids)
988
+
989
+ # make sure `position_ids` is correctly initialized when not passed
990
+ if position_ids is None:
991
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
992
+
993
+ hidden_states = self.embeddings(
994
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
995
+ )
996
+ outputs = self.encoder(
997
+ hidden_states,
998
+ attention_mask,
999
+ head_mask=head_mask,
1000
+ deterministic=deterministic,
1001
+ encoder_hidden_states=encoder_hidden_states,
1002
+ encoder_attention_mask=encoder_attention_mask,
1003
+ init_cache=init_cache,
1004
+ output_attentions=output_attentions,
1005
+ output_hidden_states=output_hidden_states,
1006
+ return_dict=return_dict,
1007
+ )
1008
+ hidden_states = outputs[0]
1009
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
1010
+
1011
+ if not return_dict:
1012
+ # if pooled is None, don't return it
1013
+ if pooled is None:
1014
+ return (hidden_states,) + outputs[1:]
1015
+ return (hidden_states, pooled) + outputs[1:]
1016
+
1017
+ return FlaxBaseModelOutputWithPoolingAndCrossAttentions(
1018
+ last_hidden_state=hidden_states,
1019
+ pooler_output=pooled,
1020
+ hidden_states=outputs.hidden_states,
1021
+ attentions=outputs.attentions,
1022
+ cross_attentions=outputs.cross_attentions,
1023
+ )
1024
+
1025
+
1026
+ @add_start_docstrings(
1027
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
1028
+ BERT_START_DOCSTRING,
1029
+ )
1030
+ class FlaxBertModel(FlaxBertPreTrainedModel):
1031
+ module_class = FlaxBertModule
1032
+
1033
+
1034
+ append_call_sample_docstring(FlaxBertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
1035
+
1036
+
1037
+ class FlaxBertForPreTrainingModule(nn.Module):
1038
+ config: BertConfig
1039
+ dtype: jnp.dtype = jnp.float32
1040
+ gradient_checkpointing: bool = False
1041
+
1042
+ def setup(self):
1043
+ self.bert = FlaxBertModule(
1044
+ config=self.config,
1045
+ dtype=self.dtype,
1046
+ gradient_checkpointing=self.gradient_checkpointing,
1047
+ )
1048
+ self.cls = FlaxBertPreTrainingHeads(config=self.config, dtype=self.dtype)
1049
+
1050
+ def __call__(
1051
+ self,
1052
+ input_ids,
1053
+ attention_mask,
1054
+ token_type_ids,
1055
+ position_ids,
1056
+ head_mask,
1057
+ deterministic: bool = True,
1058
+ output_attentions: bool = False,
1059
+ output_hidden_states: bool = False,
1060
+ return_dict: bool = True,
1061
+ ):
1062
+ # Model
1063
+ outputs = self.bert(
1064
+ input_ids,
1065
+ attention_mask,
1066
+ token_type_ids,
1067
+ position_ids,
1068
+ head_mask,
1069
+ deterministic=deterministic,
1070
+ output_attentions=output_attentions,
1071
+ output_hidden_states=output_hidden_states,
1072
+ return_dict=return_dict,
1073
+ )
1074
+
1075
+ if self.config.tie_word_embeddings:
1076
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1077
+ else:
1078
+ shared_embedding = None
1079
+
1080
+ hidden_states = outputs[0]
1081
+ pooled_output = outputs[1]
1082
+
1083
+ prediction_scores, seq_relationship_score = self.cls(
1084
+ hidden_states, pooled_output, shared_embedding=shared_embedding
1085
+ )
1086
+
1087
+ if not return_dict:
1088
+ return (prediction_scores, seq_relationship_score) + outputs[2:]
1089
+
1090
+ return FlaxBertForPreTrainingOutput(
1091
+ prediction_logits=prediction_scores,
1092
+ seq_relationship_logits=seq_relationship_score,
1093
+ hidden_states=outputs.hidden_states,
1094
+ attentions=outputs.attentions,
1095
+ )
1096
+
1097
+
1098
+ @add_start_docstrings(
1099
+ """
1100
+ Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
1101
+ sentence prediction (classification)` head.
1102
+ """,
1103
+ BERT_START_DOCSTRING,
1104
+ )
1105
+ class FlaxBertForPreTraining(FlaxBertPreTrainedModel):
1106
+ module_class = FlaxBertForPreTrainingModule
1107
+
1108
+
1109
+ FLAX_BERT_FOR_PRETRAINING_DOCSTRING = """
1110
+ Returns:
1111
+
1112
+ Example:
1113
+
1114
+ ```python
1115
+ >>> from transformers import AutoTokenizer, FlaxBertForPreTraining
1116
+
1117
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1118
+ >>> model = FlaxBertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
1119
+
1120
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
1121
+ >>> outputs = model(**inputs)
1122
+
1123
+ >>> prediction_logits = outputs.prediction_logits
1124
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1125
+ ```
1126
+ """
1127
+
1128
+ overwrite_call_docstring(
1129
+ FlaxBertForPreTraining,
1130
+ BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_PRETRAINING_DOCSTRING,
1131
+ )
1132
+ append_replace_return_docstrings(
1133
+ FlaxBertForPreTraining, output_type=FlaxBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
1134
+ )
1135
+
1136
+
1137
+ class FlaxBertForMaskedLMModule(nn.Module):
1138
+ config: BertConfig
1139
+ dtype: jnp.dtype = jnp.float32
1140
+ gradient_checkpointing: bool = False
1141
+
1142
+ def setup(self):
1143
+ self.bert = FlaxBertModule(
1144
+ config=self.config,
1145
+ add_pooling_layer=False,
1146
+ dtype=self.dtype,
1147
+ gradient_checkpointing=self.gradient_checkpointing,
1148
+ )
1149
+ self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
1150
+
1151
+ def __call__(
1152
+ self,
1153
+ input_ids,
1154
+ attention_mask,
1155
+ token_type_ids,
1156
+ position_ids,
1157
+ head_mask,
1158
+ deterministic: bool = True,
1159
+ output_attentions: bool = False,
1160
+ output_hidden_states: bool = False,
1161
+ return_dict: bool = True,
1162
+ ):
1163
+ # Model
1164
+ outputs = self.bert(
1165
+ input_ids,
1166
+ attention_mask,
1167
+ token_type_ids,
1168
+ position_ids,
1169
+ head_mask,
1170
+ deterministic=deterministic,
1171
+ output_attentions=output_attentions,
1172
+ output_hidden_states=output_hidden_states,
1173
+ return_dict=return_dict,
1174
+ )
1175
+
1176
+ hidden_states = outputs[0]
1177
+ if self.config.tie_word_embeddings:
1178
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1179
+ else:
1180
+ shared_embedding = None
1181
+
1182
+ # Compute the prediction scores
1183
+ logits = self.cls(hidden_states, shared_embedding=shared_embedding)
1184
+
1185
+ if not return_dict:
1186
+ return (logits,) + outputs[1:]
1187
+
1188
+ return FlaxMaskedLMOutput(
1189
+ logits=logits,
1190
+ hidden_states=outputs.hidden_states,
1191
+ attentions=outputs.attentions,
1192
+ )
1193
+
1194
+
1195
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
1196
+ class FlaxBertForMaskedLM(FlaxBertPreTrainedModel):
1197
+ module_class = FlaxBertForMaskedLMModule
1198
+
1199
+
1200
+ append_call_sample_docstring(FlaxBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
1201
+
1202
+
1203
+ class FlaxBertForNextSentencePredictionModule(nn.Module):
1204
+ config: BertConfig
1205
+ dtype: jnp.dtype = jnp.float32
1206
+ gradient_checkpointing: bool = False
1207
+
1208
+ def setup(self):
1209
+ self.bert = FlaxBertModule(
1210
+ config=self.config,
1211
+ dtype=self.dtype,
1212
+ gradient_checkpointing=self.gradient_checkpointing,
1213
+ )
1214
+ self.cls = FlaxBertOnlyNSPHead(dtype=self.dtype)
1215
+
1216
+ def __call__(
1217
+ self,
1218
+ input_ids,
1219
+ attention_mask,
1220
+ token_type_ids,
1221
+ position_ids,
1222
+ head_mask,
1223
+ deterministic: bool = True,
1224
+ output_attentions: bool = False,
1225
+ output_hidden_states: bool = False,
1226
+ return_dict: bool = True,
1227
+ ):
1228
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1229
+
1230
+ # Model
1231
+ outputs = self.bert(
1232
+ input_ids,
1233
+ attention_mask,
1234
+ token_type_ids,
1235
+ position_ids,
1236
+ head_mask,
1237
+ deterministic=deterministic,
1238
+ output_attentions=output_attentions,
1239
+ output_hidden_states=output_hidden_states,
1240
+ return_dict=return_dict,
1241
+ )
1242
+
1243
+ pooled_output = outputs[1]
1244
+ seq_relationship_scores = self.cls(pooled_output)
1245
+
1246
+ if not return_dict:
1247
+ return (seq_relationship_scores,) + outputs[2:]
1248
+
1249
+ return FlaxNextSentencePredictorOutput(
1250
+ logits=seq_relationship_scores,
1251
+ hidden_states=outputs.hidden_states,
1252
+ attentions=outputs.attentions,
1253
+ )
1254
+
1255
+
1256
+ @add_start_docstrings(
1257
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1258
+ BERT_START_DOCSTRING,
1259
+ )
1260
+ class FlaxBertForNextSentencePrediction(FlaxBertPreTrainedModel):
1261
+ module_class = FlaxBertForNextSentencePredictionModule
1262
+
1263
+
1264
+ FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING = """
1265
+ Returns:
1266
+
1267
+ Example:
1268
+
1269
+ ```python
1270
+ >>> from transformers import AutoTokenizer, FlaxBertForNextSentencePrediction
1271
+
1272
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1273
+ >>> model = FlaxBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1274
+
1275
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1276
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1277
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="jax")
1278
+
1279
+ >>> outputs = model(**encoding)
1280
+ >>> logits = outputs.logits
1281
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1282
+ ```
1283
+ """
1284
+
1285
+
1286
+ overwrite_call_docstring(
1287
+ FlaxBertForNextSentencePrediction,
1288
+ BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING,
1289
+ )
1290
+ append_replace_return_docstrings(
1291
+ FlaxBertForNextSentencePrediction, output_type=FlaxNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC
1292
+ )
1293
+
1294
+
1295
+ class FlaxBertForSequenceClassificationModule(nn.Module):
1296
+ config: BertConfig
1297
+ dtype: jnp.dtype = jnp.float32
1298
+ gradient_checkpointing: bool = False
1299
+
1300
+ def setup(self):
1301
+ self.bert = FlaxBertModule(
1302
+ config=self.config,
1303
+ dtype=self.dtype,
1304
+ gradient_checkpointing=self.gradient_checkpointing,
1305
+ )
1306
+ classifier_dropout = (
1307
+ self.config.classifier_dropout
1308
+ if self.config.classifier_dropout is not None
1309
+ else self.config.hidden_dropout_prob
1310
+ )
1311
+ self.dropout = nn.Dropout(rate=classifier_dropout)
1312
+ self.classifier = nn.Dense(
1313
+ self.config.num_labels,
1314
+ dtype=self.dtype,
1315
+ )
1316
+
1317
+ def __call__(
1318
+ self,
1319
+ input_ids,
1320
+ attention_mask,
1321
+ token_type_ids,
1322
+ position_ids,
1323
+ head_mask,
1324
+ deterministic: bool = True,
1325
+ output_attentions: bool = False,
1326
+ output_hidden_states: bool = False,
1327
+ return_dict: bool = True,
1328
+ ):
1329
+ # Model
1330
+ outputs = self.bert(
1331
+ input_ids,
1332
+ attention_mask,
1333
+ token_type_ids,
1334
+ position_ids,
1335
+ head_mask,
1336
+ deterministic=deterministic,
1337
+ output_attentions=output_attentions,
1338
+ output_hidden_states=output_hidden_states,
1339
+ return_dict=return_dict,
1340
+ )
1341
+
1342
+ pooled_output = outputs[1]
1343
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
1344
+ logits = self.classifier(pooled_output)
1345
+
1346
+ if not return_dict:
1347
+ return (logits,) + outputs[2:]
1348
+
1349
+ return FlaxSequenceClassifierOutput(
1350
+ logits=logits,
1351
+ hidden_states=outputs.hidden_states,
1352
+ attentions=outputs.attentions,
1353
+ )
1354
+
1355
+
1356
+ @add_start_docstrings(
1357
+ """
1358
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1359
+ output) e.g. for GLUE tasks.
1360
+ """,
1361
+ BERT_START_DOCSTRING,
1362
+ )
1363
+ class FlaxBertForSequenceClassification(FlaxBertPreTrainedModel):
1364
+ module_class = FlaxBertForSequenceClassificationModule
1365
+
1366
+
1367
+ append_call_sample_docstring(
1368
+ FlaxBertForSequenceClassification,
1369
+ _CHECKPOINT_FOR_DOC,
1370
+ FlaxSequenceClassifierOutput,
1371
+ _CONFIG_FOR_DOC,
1372
+ )
1373
+
1374
+
1375
+ class FlaxBertForMultipleChoiceModule(nn.Module):
1376
+ config: BertConfig
1377
+ dtype: jnp.dtype = jnp.float32
1378
+ gradient_checkpointing: bool = False
1379
+
1380
+ def setup(self):
1381
+ self.bert = FlaxBertModule(
1382
+ config=self.config,
1383
+ dtype=self.dtype,
1384
+ gradient_checkpointing=self.gradient_checkpointing,
1385
+ )
1386
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
1387
+ self.classifier = nn.Dense(1, dtype=self.dtype)
1388
+
1389
+ def __call__(
1390
+ self,
1391
+ input_ids,
1392
+ attention_mask,
1393
+ token_type_ids,
1394
+ position_ids,
1395
+ head_mask,
1396
+ deterministic: bool = True,
1397
+ output_attentions: bool = False,
1398
+ output_hidden_states: bool = False,
1399
+ return_dict: bool = True,
1400
+ ):
1401
+ num_choices = input_ids.shape[1]
1402
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
1403
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
1404
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
1405
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
1406
+
1407
+ # Model
1408
+ outputs = self.bert(
1409
+ input_ids,
1410
+ attention_mask,
1411
+ token_type_ids,
1412
+ position_ids,
1413
+ head_mask,
1414
+ deterministic=deterministic,
1415
+ output_attentions=output_attentions,
1416
+ output_hidden_states=output_hidden_states,
1417
+ return_dict=return_dict,
1418
+ )
1419
+
1420
+ pooled_output = outputs[1]
1421
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
1422
+ logits = self.classifier(pooled_output)
1423
+
1424
+ reshaped_logits = logits.reshape(-1, num_choices)
1425
+
1426
+ if not return_dict:
1427
+ return (reshaped_logits,) + outputs[2:]
1428
+
1429
+ return FlaxMultipleChoiceModelOutput(
1430
+ logits=reshaped_logits,
1431
+ hidden_states=outputs.hidden_states,
1432
+ attentions=outputs.attentions,
1433
+ )
1434
+
1435
+
1436
+ @add_start_docstrings(
1437
+ """
1438
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1439
+ softmax) e.g. for RocStories/SWAG tasks.
1440
+ """,
1441
+ BERT_START_DOCSTRING,
1442
+ )
1443
+ class FlaxBertForMultipleChoice(FlaxBertPreTrainedModel):
1444
+ module_class = FlaxBertForMultipleChoiceModule
1445
+
1446
+
1447
+ overwrite_call_docstring(
1448
+ FlaxBertForMultipleChoice, BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1449
+ )
1450
+ append_call_sample_docstring(
1451
+ FlaxBertForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC
1452
+ )
1453
+
1454
+
1455
+ class FlaxBertForTokenClassificationModule(nn.Module):
1456
+ config: BertConfig
1457
+ dtype: jnp.dtype = jnp.float32
1458
+ gradient_checkpointing: bool = False
1459
+
1460
+ def setup(self):
1461
+ self.bert = FlaxBertModule(
1462
+ config=self.config,
1463
+ dtype=self.dtype,
1464
+ add_pooling_layer=False,
1465
+ gradient_checkpointing=self.gradient_checkpointing,
1466
+ )
1467
+ classifier_dropout = (
1468
+ self.config.classifier_dropout
1469
+ if self.config.classifier_dropout is not None
1470
+ else self.config.hidden_dropout_prob
1471
+ )
1472
+ self.dropout = nn.Dropout(rate=classifier_dropout)
1473
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
1474
+
1475
+ def __call__(
1476
+ self,
1477
+ input_ids,
1478
+ attention_mask,
1479
+ token_type_ids,
1480
+ position_ids,
1481
+ head_mask,
1482
+ deterministic: bool = True,
1483
+ output_attentions: bool = False,
1484
+ output_hidden_states: bool = False,
1485
+ return_dict: bool = True,
1486
+ ):
1487
+ # Model
1488
+ outputs = self.bert(
1489
+ input_ids,
1490
+ attention_mask,
1491
+ token_type_ids,
1492
+ position_ids,
1493
+ head_mask,
1494
+ deterministic=deterministic,
1495
+ output_attentions=output_attentions,
1496
+ output_hidden_states=output_hidden_states,
1497
+ return_dict=return_dict,
1498
+ )
1499
+
1500
+ hidden_states = outputs[0]
1501
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1502
+ logits = self.classifier(hidden_states)
1503
+
1504
+ if not return_dict:
1505
+ return (logits,) + outputs[1:]
1506
+
1507
+ return FlaxTokenClassifierOutput(
1508
+ logits=logits,
1509
+ hidden_states=outputs.hidden_states,
1510
+ attentions=outputs.attentions,
1511
+ )
1512
+
1513
+
1514
+ @add_start_docstrings(
1515
+ """
1516
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1517
+ Named-Entity-Recognition (NER) tasks.
1518
+ """,
1519
+ BERT_START_DOCSTRING,
1520
+ )
1521
+ class FlaxBertForTokenClassification(FlaxBertPreTrainedModel):
1522
+ module_class = FlaxBertForTokenClassificationModule
1523
+
1524
+
1525
+ append_call_sample_docstring(
1526
+ FlaxBertForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC
1527
+ )
1528
+
1529
+
1530
+ class FlaxBertForQuestionAnsweringModule(nn.Module):
1531
+ config: BertConfig
1532
+ dtype: jnp.dtype = jnp.float32
1533
+ gradient_checkpointing: bool = False
1534
+
1535
+ def setup(self):
1536
+ self.bert = FlaxBertModule(
1537
+ config=self.config,
1538
+ dtype=self.dtype,
1539
+ add_pooling_layer=False,
1540
+ gradient_checkpointing=self.gradient_checkpointing,
1541
+ )
1542
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1543
+
1544
+ def __call__(
1545
+ self,
1546
+ input_ids,
1547
+ attention_mask,
1548
+ token_type_ids,
1549
+ position_ids,
1550
+ head_mask,
1551
+ deterministic: bool = True,
1552
+ output_attentions: bool = False,
1553
+ output_hidden_states: bool = False,
1554
+ return_dict: bool = True,
1555
+ ):
1556
+ # Model
1557
+ outputs = self.bert(
1558
+ input_ids,
1559
+ attention_mask,
1560
+ token_type_ids,
1561
+ position_ids,
1562
+ head_mask,
1563
+ deterministic=deterministic,
1564
+ output_attentions=output_attentions,
1565
+ output_hidden_states=output_hidden_states,
1566
+ return_dict=return_dict,
1567
+ )
1568
+
1569
+ hidden_states = outputs[0]
1570
+
1571
+ logits = self.qa_outputs(hidden_states)
1572
+ start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1)
1573
+ start_logits = start_logits.squeeze(-1)
1574
+ end_logits = end_logits.squeeze(-1)
1575
+
1576
+ if not return_dict:
1577
+ return (start_logits, end_logits) + outputs[1:]
1578
+
1579
+ return FlaxQuestionAnsweringModelOutput(
1580
+ start_logits=start_logits,
1581
+ end_logits=end_logits,
1582
+ hidden_states=outputs.hidden_states,
1583
+ attentions=outputs.attentions,
1584
+ )
1585
+
1586
+
1587
+ @add_start_docstrings(
1588
+ """
1589
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1590
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1591
+ """,
1592
+ BERT_START_DOCSTRING,
1593
+ )
1594
+ class FlaxBertForQuestionAnswering(FlaxBertPreTrainedModel):
1595
+ module_class = FlaxBertForQuestionAnsweringModule
1596
+
1597
+
1598
+ append_call_sample_docstring(
1599
+ FlaxBertForQuestionAnswering,
1600
+ _CHECKPOINT_FOR_DOC,
1601
+ FlaxQuestionAnsweringModelOutput,
1602
+ _CONFIG_FOR_DOC,
1603
+ )
1604
+
1605
+
1606
+ class FlaxBertForCausalLMModule(nn.Module):
1607
+ config: BertConfig
1608
+ dtype: jnp.dtype = jnp.float32
1609
+ gradient_checkpointing: bool = False
1610
+
1611
+ def setup(self):
1612
+ self.bert = FlaxBertModule(
1613
+ config=self.config,
1614
+ add_pooling_layer=False,
1615
+ dtype=self.dtype,
1616
+ gradient_checkpointing=self.gradient_checkpointing,
1617
+ )
1618
+ self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)
1619
+
1620
+ def __call__(
1621
+ self,
1622
+ input_ids,
1623
+ attention_mask,
1624
+ position_ids,
1625
+ token_type_ids: Optional[jnp.ndarray] = None,
1626
+ head_mask: Optional[jnp.ndarray] = None,
1627
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1628
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1629
+ init_cache: bool = False,
1630
+ deterministic: bool = True,
1631
+ output_attentions: bool = False,
1632
+ output_hidden_states: bool = False,
1633
+ return_dict: bool = True,
1634
+ ):
1635
+ # Model
1636
+ outputs = self.bert(
1637
+ input_ids,
1638
+ attention_mask,
1639
+ token_type_ids,
1640
+ position_ids,
1641
+ head_mask,
1642
+ encoder_hidden_states=encoder_hidden_states,
1643
+ encoder_attention_mask=encoder_attention_mask,
1644
+ init_cache=init_cache,
1645
+ deterministic=deterministic,
1646
+ output_attentions=output_attentions,
1647
+ output_hidden_states=output_hidden_states,
1648
+ return_dict=return_dict,
1649
+ )
1650
+
1651
+ hidden_states = outputs[0]
1652
+ if self.config.tie_word_embeddings:
1653
+ shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1654
+ else:
1655
+ shared_embedding = None
1656
+
1657
+ # Compute the prediction scores
1658
+ logits = self.cls(hidden_states, shared_embedding=shared_embedding)
1659
+
1660
+ if not return_dict:
1661
+ return (logits,) + outputs[1:]
1662
+
1663
+ return FlaxCausalLMOutputWithCrossAttentions(
1664
+ logits=logits,
1665
+ hidden_states=outputs.hidden_states,
1666
+ attentions=outputs.attentions,
1667
+ cross_attentions=outputs.cross_attentions,
1668
+ )
1669
+
1670
+
1671
+ @add_start_docstrings(
1672
+ """
1673
+ Bert Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
1674
+ autoregressive tasks.
1675
+ """,
1676
+ BERT_START_DOCSTRING,
1677
+ )
1678
+ class FlaxBertForCausalLM(FlaxBertPreTrainedModel):
1679
+ module_class = FlaxBertForCausalLMModule
1680
+
1681
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
1682
+ # initializing the cache
1683
+ batch_size, seq_length = input_ids.shape
1684
+
1685
+ past_key_values = self.init_cache(batch_size, max_length)
1686
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1687
+ # But since the decoder uses a causal mask, those positions are masked anyway.
1688
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
1689
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1690
+ if attention_mask is not None:
1691
+ position_ids = attention_mask.cumsum(axis=-1) - 1
1692
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
1693
+ else:
1694
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1695
+
1696
+ return {
1697
+ "past_key_values": past_key_values,
1698
+ "attention_mask": extended_attention_mask,
1699
+ "position_ids": position_ids,
1700
+ }
1701
+
1702
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1703
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1704
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
1705
+ return model_kwargs
1706
+
1707
+
1708
+ append_call_sample_docstring(
1709
+ FlaxBertForCausalLM,
1710
+ _CHECKPOINT_FOR_DOC,
1711
+ FlaxCausalLMOutputWithCrossAttentions,
1712
+ _CONFIG_FOR_DOC,
1713
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/modeling_tf_bert.py ADDED
@@ -0,0 +1,2114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 BERT model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import math
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from typing import Dict, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import tensorflow as tf
28
+
29
+ from ...activations_tf import get_tf_activation
30
+ from ...modeling_tf_outputs import (
31
+ TFBaseModelOutputWithPastAndCrossAttentions,
32
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
33
+ TFCausalLMOutputWithCrossAttentions,
34
+ TFMaskedLMOutput,
35
+ TFMultipleChoiceModelOutput,
36
+ TFNextSentencePredictorOutput,
37
+ TFQuestionAnsweringModelOutput,
38
+ TFSequenceClassifierOutput,
39
+ TFTokenClassifierOutput,
40
+ )
41
+ from ...modeling_tf_utils import (
42
+ TFCausalLanguageModelingLoss,
43
+ TFMaskedLanguageModelingLoss,
44
+ TFModelInputType,
45
+ TFMultipleChoiceLoss,
46
+ TFNextSentencePredictionLoss,
47
+ TFPreTrainedModel,
48
+ TFQuestionAnsweringLoss,
49
+ TFSequenceClassificationLoss,
50
+ TFTokenClassificationLoss,
51
+ get_initializer,
52
+ keras,
53
+ keras_serializable,
54
+ unpack_inputs,
55
+ )
56
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
57
+ from ...utils import (
58
+ ModelOutput,
59
+ add_code_sample_docstrings,
60
+ add_start_docstrings,
61
+ add_start_docstrings_to_model_forward,
62
+ logging,
63
+ replace_return_docstrings,
64
+ )
65
+ from .configuration_bert import BertConfig
66
+
67
+
68
+ logger = logging.get_logger(__name__)
69
+
70
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
71
+ _CONFIG_FOR_DOC = "BertConfig"
72
+
73
+ # TokenClassification docstring
74
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "dbmdz/bert-large-cased-finetuned-conll03-english"
75
+ _TOKEN_CLASS_EXPECTED_OUTPUT = (
76
+ "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] "
77
+ )
78
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.01
79
+
80
+ # QuestionAnswering docstring
81
+ _CHECKPOINT_FOR_QA = "ydshieh/bert-base-cased-squad2"
82
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
83
+ _QA_EXPECTED_LOSS = 7.41
84
+ _QA_TARGET_START_INDEX = 14
85
+ _QA_TARGET_END_INDEX = 15
86
+
87
+ # SequenceClassification docstring
88
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ydshieh/bert-base-uncased-yelp-polarity"
89
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'"
90
+ _SEQ_CLASS_EXPECTED_LOSS = 0.01
91
+
92
+
93
+ from ..deprecated._archive_maps import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
94
+
95
+
96
+ class TFBertPreTrainingLoss:
97
+ """
98
+ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining
99
+ NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss
100
+ computation.
101
+ """
102
+
103
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
104
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
105
+
106
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
107
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
108
+ # make sure only labels that are not equal to -100
109
+ # are taken into account for the loss computation
110
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
111
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
112
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
113
+
114
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
115
+ unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1])
116
+ ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype)
117
+ masked_ns_loss = unmasked_ns_loss * ns_loss_mask
118
+
119
+ reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask)
120
+
121
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,))
122
+
123
+
124
+ class TFBertEmbeddings(keras.layers.Layer):
125
+ """Construct the embeddings from word, position and token_type embeddings."""
126
+
127
+ def __init__(self, config: BertConfig, **kwargs):
128
+ super().__init__(**kwargs)
129
+
130
+ self.config = config
131
+ self.hidden_size = config.hidden_size
132
+ self.max_position_embeddings = config.max_position_embeddings
133
+ self.initializer_range = config.initializer_range
134
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
135
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
136
+
137
+ def build(self, input_shape=None):
138
+ with tf.name_scope("word_embeddings"):
139
+ self.weight = self.add_weight(
140
+ name="weight",
141
+ shape=[self.config.vocab_size, self.hidden_size],
142
+ initializer=get_initializer(self.initializer_range),
143
+ )
144
+
145
+ with tf.name_scope("token_type_embeddings"):
146
+ self.token_type_embeddings = self.add_weight(
147
+ name="embeddings",
148
+ shape=[self.config.type_vocab_size, self.hidden_size],
149
+ initializer=get_initializer(self.initializer_range),
150
+ )
151
+
152
+ with tf.name_scope("position_embeddings"):
153
+ self.position_embeddings = self.add_weight(
154
+ name="embeddings",
155
+ shape=[self.max_position_embeddings, self.hidden_size],
156
+ initializer=get_initializer(self.initializer_range),
157
+ )
158
+
159
+ if self.built:
160
+ return
161
+ self.built = True
162
+ if getattr(self, "LayerNorm", None) is not None:
163
+ with tf.name_scope(self.LayerNorm.name):
164
+ self.LayerNorm.build([None, None, self.config.hidden_size])
165
+
166
+ def call(
167
+ self,
168
+ input_ids: tf.Tensor = None,
169
+ position_ids: tf.Tensor = None,
170
+ token_type_ids: tf.Tensor = None,
171
+ inputs_embeds: tf.Tensor = None,
172
+ past_key_values_length=0,
173
+ training: bool = False,
174
+ ) -> tf.Tensor:
175
+ """
176
+ Applies embedding based on inputs tensor.
177
+
178
+ Returns:
179
+ final_embeddings (`tf.Tensor`): output embedding tensor.
180
+ """
181
+ if input_ids is None and inputs_embeds is None:
182
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
183
+
184
+ if input_ids is not None:
185
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
186
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
187
+
188
+ input_shape = shape_list(inputs_embeds)[:-1]
189
+
190
+ if token_type_ids is None:
191
+ token_type_ids = tf.fill(dims=input_shape, value=0)
192
+
193
+ if position_ids is None:
194
+ position_ids = tf.expand_dims(
195
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
196
+ )
197
+
198
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
199
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
200
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
201
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
202
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
203
+
204
+ return final_embeddings
205
+
206
+
207
+ class TFBertSelfAttention(keras.layers.Layer):
208
+ def __init__(self, config: BertConfig, **kwargs):
209
+ super().__init__(**kwargs)
210
+
211
+ if config.hidden_size % config.num_attention_heads != 0:
212
+ raise ValueError(
213
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
214
+ f"of attention heads ({config.num_attention_heads})"
215
+ )
216
+
217
+ self.num_attention_heads = config.num_attention_heads
218
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
219
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
220
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
221
+
222
+ self.query = keras.layers.Dense(
223
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
224
+ )
225
+ self.key = keras.layers.Dense(
226
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
227
+ )
228
+ self.value = keras.layers.Dense(
229
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
230
+ )
231
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
232
+
233
+ self.is_decoder = config.is_decoder
234
+ self.config = config
235
+
236
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
237
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
238
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
239
+
240
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
241
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
242
+
243
+ def call(
244
+ self,
245
+ hidden_states: tf.Tensor,
246
+ attention_mask: tf.Tensor,
247
+ head_mask: tf.Tensor,
248
+ encoder_hidden_states: tf.Tensor,
249
+ encoder_attention_mask: tf.Tensor,
250
+ past_key_value: Tuple[tf.Tensor],
251
+ output_attentions: bool,
252
+ training: bool = False,
253
+ ) -> Tuple[tf.Tensor]:
254
+ batch_size = shape_list(hidden_states)[0]
255
+ mixed_query_layer = self.query(inputs=hidden_states)
256
+
257
+ # If this is instantiated as a cross-attention module, the keys
258
+ # and values come from an encoder; the attention mask needs to be
259
+ # such that the encoder's padding tokens are not attended to.
260
+ is_cross_attention = encoder_hidden_states is not None
261
+
262
+ if is_cross_attention and past_key_value is not None:
263
+ # reuse k,v, cross_attentions
264
+ key_layer = past_key_value[0]
265
+ value_layer = past_key_value[1]
266
+ attention_mask = encoder_attention_mask
267
+ elif is_cross_attention:
268
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
269
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
270
+ attention_mask = encoder_attention_mask
271
+ elif past_key_value is not None:
272
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
273
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
274
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
275
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
276
+ else:
277
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
278
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
279
+
280
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
281
+
282
+ if self.is_decoder:
283
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
284
+ # Further calls to cross_attention layer can then reuse all cross-attention
285
+ # key/value_states (first "if" case)
286
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
287
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
288
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
289
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
290
+ past_key_value = (key_layer, value_layer)
291
+
292
+ # Take the dot product between "query" and "key" to get the raw attention scores.
293
+ # (batch size, num_heads, seq_len_q, seq_len_k)
294
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
295
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
296
+ attention_scores = tf.divide(attention_scores, dk)
297
+
298
+ if attention_mask is not None:
299
+ # Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
300
+ attention_scores = tf.add(attention_scores, attention_mask)
301
+
302
+ # Normalize the attention scores to probabilities.
303
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
304
+
305
+ # This is actually dropping out entire tokens to attend to, which might
306
+ # seem a bit unusual, but is taken from the original Transformer paper.
307
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
308
+
309
+ # Mask heads if we want to
310
+ if head_mask is not None:
311
+ attention_probs = tf.multiply(attention_probs, head_mask)
312
+
313
+ attention_output = tf.matmul(attention_probs, value_layer)
314
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
315
+
316
+ # (batch_size, seq_len_q, all_head_size)
317
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
318
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
319
+
320
+ if self.is_decoder:
321
+ outputs = outputs + (past_key_value,)
322
+ return outputs
323
+
324
+ def build(self, input_shape=None):
325
+ if self.built:
326
+ return
327
+ self.built = True
328
+ if getattr(self, "query", None) is not None:
329
+ with tf.name_scope(self.query.name):
330
+ self.query.build([None, None, self.config.hidden_size])
331
+ if getattr(self, "key", None) is not None:
332
+ with tf.name_scope(self.key.name):
333
+ self.key.build([None, None, self.config.hidden_size])
334
+ if getattr(self, "value", None) is not None:
335
+ with tf.name_scope(self.value.name):
336
+ self.value.build([None, None, self.config.hidden_size])
337
+
338
+
339
+ class TFBertSelfOutput(keras.layers.Layer):
340
+ def __init__(self, config: BertConfig, **kwargs):
341
+ super().__init__(**kwargs)
342
+
343
+ self.dense = keras.layers.Dense(
344
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
345
+ )
346
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
347
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
348
+ self.config = config
349
+
350
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
351
+ hidden_states = self.dense(inputs=hidden_states)
352
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
353
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
354
+
355
+ return hidden_states
356
+
357
+ def build(self, input_shape=None):
358
+ if self.built:
359
+ return
360
+ self.built = True
361
+ if getattr(self, "dense", None) is not None:
362
+ with tf.name_scope(self.dense.name):
363
+ self.dense.build([None, None, self.config.hidden_size])
364
+ if getattr(self, "LayerNorm", None) is not None:
365
+ with tf.name_scope(self.LayerNorm.name):
366
+ self.LayerNorm.build([None, None, self.config.hidden_size])
367
+
368
+
369
+ class TFBertAttention(keras.layers.Layer):
370
+ def __init__(self, config: BertConfig, **kwargs):
371
+ super().__init__(**kwargs)
372
+
373
+ self.self_attention = TFBertSelfAttention(config, name="self")
374
+ self.dense_output = TFBertSelfOutput(config, name="output")
375
+
376
+ def prune_heads(self, heads):
377
+ raise NotImplementedError
378
+
379
+ def call(
380
+ self,
381
+ input_tensor: tf.Tensor,
382
+ attention_mask: tf.Tensor,
383
+ head_mask: tf.Tensor,
384
+ encoder_hidden_states: tf.Tensor,
385
+ encoder_attention_mask: tf.Tensor,
386
+ past_key_value: Tuple[tf.Tensor],
387
+ output_attentions: bool,
388
+ training: bool = False,
389
+ ) -> Tuple[tf.Tensor]:
390
+ self_outputs = self.self_attention(
391
+ hidden_states=input_tensor,
392
+ attention_mask=attention_mask,
393
+ head_mask=head_mask,
394
+ encoder_hidden_states=encoder_hidden_states,
395
+ encoder_attention_mask=encoder_attention_mask,
396
+ past_key_value=past_key_value,
397
+ output_attentions=output_attentions,
398
+ training=training,
399
+ )
400
+ attention_output = self.dense_output(
401
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
402
+ )
403
+ # add attentions (possibly with past_key_value) if we output them
404
+ outputs = (attention_output,) + self_outputs[1:]
405
+
406
+ return outputs
407
+
408
+ def build(self, input_shape=None):
409
+ if self.built:
410
+ return
411
+ self.built = True
412
+ if getattr(self, "self_attention", None) is not None:
413
+ with tf.name_scope(self.self_attention.name):
414
+ self.self_attention.build(None)
415
+ if getattr(self, "dense_output", None) is not None:
416
+ with tf.name_scope(self.dense_output.name):
417
+ self.dense_output.build(None)
418
+
419
+
420
+ class TFBertIntermediate(keras.layers.Layer):
421
+ def __init__(self, config: BertConfig, **kwargs):
422
+ super().__init__(**kwargs)
423
+
424
+ self.dense = keras.layers.Dense(
425
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
426
+ )
427
+
428
+ if isinstance(config.hidden_act, str):
429
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
430
+ else:
431
+ self.intermediate_act_fn = config.hidden_act
432
+ self.config = config
433
+
434
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
435
+ hidden_states = self.dense(inputs=hidden_states)
436
+ hidden_states = self.intermediate_act_fn(hidden_states)
437
+
438
+ return hidden_states
439
+
440
+ def build(self, input_shape=None):
441
+ if self.built:
442
+ return
443
+ self.built = True
444
+ if getattr(self, "dense", None) is not None:
445
+ with tf.name_scope(self.dense.name):
446
+ self.dense.build([None, None, self.config.hidden_size])
447
+
448
+
449
+ class TFBertOutput(keras.layers.Layer):
450
+ def __init__(self, config: BertConfig, **kwargs):
451
+ super().__init__(**kwargs)
452
+
453
+ self.dense = keras.layers.Dense(
454
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
455
+ )
456
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
457
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
458
+ self.config = config
459
+
460
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
461
+ hidden_states = self.dense(inputs=hidden_states)
462
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
463
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
464
+
465
+ return hidden_states
466
+
467
+ def build(self, input_shape=None):
468
+ if self.built:
469
+ return
470
+ self.built = True
471
+ if getattr(self, "dense", None) is not None:
472
+ with tf.name_scope(self.dense.name):
473
+ self.dense.build([None, None, self.config.intermediate_size])
474
+ if getattr(self, "LayerNorm", None) is not None:
475
+ with tf.name_scope(self.LayerNorm.name):
476
+ self.LayerNorm.build([None, None, self.config.hidden_size])
477
+
478
+
479
+ class TFBertLayer(keras.layers.Layer):
480
+ def __init__(self, config: BertConfig, **kwargs):
481
+ super().__init__(**kwargs)
482
+
483
+ self.attention = TFBertAttention(config, name="attention")
484
+ self.is_decoder = config.is_decoder
485
+ self.add_cross_attention = config.add_cross_attention
486
+ if self.add_cross_attention:
487
+ if not self.is_decoder:
488
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
489
+ self.crossattention = TFBertAttention(config, name="crossattention")
490
+ self.intermediate = TFBertIntermediate(config, name="intermediate")
491
+ self.bert_output = TFBertOutput(config, name="output")
492
+
493
+ def call(
494
+ self,
495
+ hidden_states: tf.Tensor,
496
+ attention_mask: tf.Tensor,
497
+ head_mask: tf.Tensor,
498
+ encoder_hidden_states: tf.Tensor | None,
499
+ encoder_attention_mask: tf.Tensor | None,
500
+ past_key_value: Tuple[tf.Tensor] | None,
501
+ output_attentions: bool,
502
+ training: bool = False,
503
+ ) -> Tuple[tf.Tensor]:
504
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
505
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
506
+ self_attention_outputs = self.attention(
507
+ input_tensor=hidden_states,
508
+ attention_mask=attention_mask,
509
+ head_mask=head_mask,
510
+ encoder_hidden_states=None,
511
+ encoder_attention_mask=None,
512
+ past_key_value=self_attn_past_key_value,
513
+ output_attentions=output_attentions,
514
+ training=training,
515
+ )
516
+ attention_output = self_attention_outputs[0]
517
+
518
+ # if decoder, the last output is tuple of self-attn cache
519
+ if self.is_decoder:
520
+ outputs = self_attention_outputs[1:-1]
521
+ present_key_value = self_attention_outputs[-1]
522
+ else:
523
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
524
+
525
+ cross_attn_present_key_value = None
526
+ if self.is_decoder and encoder_hidden_states is not None:
527
+ if not hasattr(self, "crossattention"):
528
+ raise ValueError(
529
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
530
+ " by setting `config.add_cross_attention=True`"
531
+ )
532
+
533
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
534
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
535
+ cross_attention_outputs = self.crossattention(
536
+ input_tensor=attention_output,
537
+ attention_mask=attention_mask,
538
+ head_mask=head_mask,
539
+ encoder_hidden_states=encoder_hidden_states,
540
+ encoder_attention_mask=encoder_attention_mask,
541
+ past_key_value=cross_attn_past_key_value,
542
+ output_attentions=output_attentions,
543
+ training=training,
544
+ )
545
+ attention_output = cross_attention_outputs[0]
546
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
547
+
548
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
549
+ cross_attn_present_key_value = cross_attention_outputs[-1]
550
+ present_key_value = present_key_value + cross_attn_present_key_value
551
+
552
+ intermediate_output = self.intermediate(hidden_states=attention_output)
553
+ layer_output = self.bert_output(
554
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
555
+ )
556
+ outputs = (layer_output,) + outputs # add attentions if we output them
557
+
558
+ # if decoder, return the attn key/values as the last output
559
+ if self.is_decoder:
560
+ outputs = outputs + (present_key_value,)
561
+
562
+ return outputs
563
+
564
+ def build(self, input_shape=None):
565
+ if self.built:
566
+ return
567
+ self.built = True
568
+ if getattr(self, "attention", None) is not None:
569
+ with tf.name_scope(self.attention.name):
570
+ self.attention.build(None)
571
+ if getattr(self, "intermediate", None) is not None:
572
+ with tf.name_scope(self.intermediate.name):
573
+ self.intermediate.build(None)
574
+ if getattr(self, "bert_output", None) is not None:
575
+ with tf.name_scope(self.bert_output.name):
576
+ self.bert_output.build(None)
577
+ if getattr(self, "crossattention", None) is not None:
578
+ with tf.name_scope(self.crossattention.name):
579
+ self.crossattention.build(None)
580
+
581
+
582
+ class TFBertEncoder(keras.layers.Layer):
583
+ def __init__(self, config: BertConfig, **kwargs):
584
+ super().__init__(**kwargs)
585
+ self.config = config
586
+ self.layer = [TFBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
587
+
588
+ def call(
589
+ self,
590
+ hidden_states: tf.Tensor,
591
+ attention_mask: tf.Tensor,
592
+ head_mask: tf.Tensor,
593
+ encoder_hidden_states: tf.Tensor | None,
594
+ encoder_attention_mask: tf.Tensor | None,
595
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
596
+ use_cache: Optional[bool],
597
+ output_attentions: bool,
598
+ output_hidden_states: bool,
599
+ return_dict: bool,
600
+ training: bool = False,
601
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
602
+ all_hidden_states = () if output_hidden_states else None
603
+ all_attentions = () if output_attentions else None
604
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
605
+
606
+ next_decoder_cache = () if use_cache else None
607
+ for i, layer_module in enumerate(self.layer):
608
+ if output_hidden_states:
609
+ all_hidden_states = all_hidden_states + (hidden_states,)
610
+
611
+ past_key_value = past_key_values[i] if past_key_values is not None else None
612
+
613
+ layer_outputs = layer_module(
614
+ hidden_states=hidden_states,
615
+ attention_mask=attention_mask,
616
+ head_mask=head_mask[i],
617
+ encoder_hidden_states=encoder_hidden_states,
618
+ encoder_attention_mask=encoder_attention_mask,
619
+ past_key_value=past_key_value,
620
+ output_attentions=output_attentions,
621
+ training=training,
622
+ )
623
+ hidden_states = layer_outputs[0]
624
+
625
+ if use_cache:
626
+ next_decoder_cache += (layer_outputs[-1],)
627
+
628
+ if output_attentions:
629
+ all_attentions = all_attentions + (layer_outputs[1],)
630
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
631
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
632
+
633
+ # Add last layer
634
+ if output_hidden_states:
635
+ all_hidden_states = all_hidden_states + (hidden_states,)
636
+
637
+ if not return_dict:
638
+ return tuple(
639
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
640
+ )
641
+
642
+ return TFBaseModelOutputWithPastAndCrossAttentions(
643
+ last_hidden_state=hidden_states,
644
+ past_key_values=next_decoder_cache,
645
+ hidden_states=all_hidden_states,
646
+ attentions=all_attentions,
647
+ cross_attentions=all_cross_attentions,
648
+ )
649
+
650
+ def build(self, input_shape=None):
651
+ if self.built:
652
+ return
653
+ self.built = True
654
+ if getattr(self, "layer", None) is not None:
655
+ for layer in self.layer:
656
+ with tf.name_scope(layer.name):
657
+ layer.build(None)
658
+
659
+
660
+ class TFBertPooler(keras.layers.Layer):
661
+ def __init__(self, config: BertConfig, **kwargs):
662
+ super().__init__(**kwargs)
663
+
664
+ self.dense = keras.layers.Dense(
665
+ units=config.hidden_size,
666
+ kernel_initializer=get_initializer(config.initializer_range),
667
+ activation="tanh",
668
+ name="dense",
669
+ )
670
+ self.config = config
671
+
672
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
673
+ # We "pool" the model by simply taking the hidden state corresponding
674
+ # to the first token.
675
+ first_token_tensor = hidden_states[:, 0]
676
+ pooled_output = self.dense(inputs=first_token_tensor)
677
+
678
+ return pooled_output
679
+
680
+ def build(self, input_shape=None):
681
+ if self.built:
682
+ return
683
+ self.built = True
684
+ if getattr(self, "dense", None) is not None:
685
+ with tf.name_scope(self.dense.name):
686
+ self.dense.build([None, None, self.config.hidden_size])
687
+
688
+
689
+ class TFBertPredictionHeadTransform(keras.layers.Layer):
690
+ def __init__(self, config: BertConfig, **kwargs):
691
+ super().__init__(**kwargs)
692
+
693
+ self.dense = keras.layers.Dense(
694
+ units=config.hidden_size,
695
+ kernel_initializer=get_initializer(config.initializer_range),
696
+ name="dense",
697
+ )
698
+
699
+ if isinstance(config.hidden_act, str):
700
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
701
+ else:
702
+ self.transform_act_fn = config.hidden_act
703
+
704
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
705
+ self.config = config
706
+
707
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
708
+ hidden_states = self.dense(inputs=hidden_states)
709
+ hidden_states = self.transform_act_fn(hidden_states)
710
+ hidden_states = self.LayerNorm(inputs=hidden_states)
711
+
712
+ return hidden_states
713
+
714
+ def build(self, input_shape=None):
715
+ if self.built:
716
+ return
717
+ self.built = True
718
+ if getattr(self, "dense", None) is not None:
719
+ with tf.name_scope(self.dense.name):
720
+ self.dense.build([None, None, self.config.hidden_size])
721
+ if getattr(self, "LayerNorm", None) is not None:
722
+ with tf.name_scope(self.LayerNorm.name):
723
+ self.LayerNorm.build([None, None, self.config.hidden_size])
724
+
725
+
726
+ class TFBertLMPredictionHead(keras.layers.Layer):
727
+ def __init__(self, config: BertConfig, input_embeddings: keras.layers.Layer, **kwargs):
728
+ super().__init__(**kwargs)
729
+
730
+ self.config = config
731
+ self.hidden_size = config.hidden_size
732
+
733
+ self.transform = TFBertPredictionHeadTransform(config, name="transform")
734
+
735
+ # The output weights are the same as the input embeddings, but there is
736
+ # an output-only bias for each token.
737
+ self.input_embeddings = input_embeddings
738
+
739
+ def build(self, input_shape=None):
740
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
741
+
742
+ if self.built:
743
+ return
744
+ self.built = True
745
+ if getattr(self, "transform", None) is not None:
746
+ with tf.name_scope(self.transform.name):
747
+ self.transform.build(None)
748
+
749
+ def get_output_embeddings(self) -> keras.layers.Layer:
750
+ return self.input_embeddings
751
+
752
+ def set_output_embeddings(self, value: tf.Variable):
753
+ self.input_embeddings.weight = value
754
+ self.input_embeddings.vocab_size = shape_list(value)[0]
755
+
756
+ def get_bias(self) -> Dict[str, tf.Variable]:
757
+ return {"bias": self.bias}
758
+
759
+ def set_bias(self, value: tf.Variable):
760
+ self.bias = value["bias"]
761
+ self.config.vocab_size = shape_list(value["bias"])[0]
762
+
763
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
764
+ hidden_states = self.transform(hidden_states=hidden_states)
765
+ seq_length = shape_list(hidden_states)[1]
766
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
767
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
768
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
769
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
770
+
771
+ return hidden_states
772
+
773
+
774
+ class TFBertMLMHead(keras.layers.Layer):
775
+ def __init__(self, config: BertConfig, input_embeddings: keras.layers.Layer, **kwargs):
776
+ super().__init__(**kwargs)
777
+
778
+ self.predictions = TFBertLMPredictionHead(config, input_embeddings, name="predictions")
779
+
780
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
781
+ prediction_scores = self.predictions(hidden_states=sequence_output)
782
+
783
+ return prediction_scores
784
+
785
+ def build(self, input_shape=None):
786
+ if self.built:
787
+ return
788
+ self.built = True
789
+ if getattr(self, "predictions", None) is not None:
790
+ with tf.name_scope(self.predictions.name):
791
+ self.predictions.build(None)
792
+
793
+
794
+ class TFBertNSPHead(keras.layers.Layer):
795
+ def __init__(self, config: BertConfig, **kwargs):
796
+ super().__init__(**kwargs)
797
+
798
+ self.seq_relationship = keras.layers.Dense(
799
+ units=2,
800
+ kernel_initializer=get_initializer(config.initializer_range),
801
+ name="seq_relationship",
802
+ )
803
+ self.config = config
804
+
805
+ def call(self, pooled_output: tf.Tensor) -> tf.Tensor:
806
+ seq_relationship_score = self.seq_relationship(inputs=pooled_output)
807
+
808
+ return seq_relationship_score
809
+
810
+ def build(self, input_shape=None):
811
+ if self.built:
812
+ return
813
+ self.built = True
814
+ if getattr(self, "seq_relationship", None) is not None:
815
+ with tf.name_scope(self.seq_relationship.name):
816
+ self.seq_relationship.build([None, None, self.config.hidden_size])
817
+
818
+
819
+ @keras_serializable
820
+ class TFBertMainLayer(keras.layers.Layer):
821
+ config_class = BertConfig
822
+
823
+ def __init__(self, config: BertConfig, add_pooling_layer: bool = True, **kwargs):
824
+ super().__init__(**kwargs)
825
+
826
+ self.config = config
827
+ self.is_decoder = config.is_decoder
828
+
829
+ self.embeddings = TFBertEmbeddings(config, name="embeddings")
830
+ self.encoder = TFBertEncoder(config, name="encoder")
831
+ self.pooler = TFBertPooler(config, name="pooler") if add_pooling_layer else None
832
+
833
+ def get_input_embeddings(self) -> keras.layers.Layer:
834
+ return self.embeddings
835
+
836
+ def set_input_embeddings(self, value: tf.Variable):
837
+ self.embeddings.weight = value
838
+ self.embeddings.vocab_size = shape_list(value)[0]
839
+
840
+ def _prune_heads(self, heads_to_prune):
841
+ """
842
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
843
+ class PreTrainedModel
844
+ """
845
+ raise NotImplementedError
846
+
847
+ @unpack_inputs
848
+ def call(
849
+ self,
850
+ input_ids: TFModelInputType | None = None,
851
+ attention_mask: np.ndarray | tf.Tensor | None = None,
852
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
853
+ position_ids: np.ndarray | tf.Tensor | None = None,
854
+ head_mask: np.ndarray | tf.Tensor | None = None,
855
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
856
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
857
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
858
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
859
+ use_cache: Optional[bool] = None,
860
+ output_attentions: Optional[bool] = None,
861
+ output_hidden_states: Optional[bool] = None,
862
+ return_dict: Optional[bool] = None,
863
+ training: bool = False,
864
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
865
+ if not self.config.is_decoder:
866
+ use_cache = False
867
+
868
+ if input_ids is not None and inputs_embeds is not None:
869
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
870
+ elif input_ids is not None:
871
+ input_shape = shape_list(input_ids)
872
+ elif inputs_embeds is not None:
873
+ input_shape = shape_list(inputs_embeds)[:-1]
874
+ else:
875
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
876
+
877
+ batch_size, seq_length = input_shape
878
+
879
+ if past_key_values is None:
880
+ past_key_values_length = 0
881
+ past_key_values = [None] * len(self.encoder.layer)
882
+ else:
883
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
884
+
885
+ if attention_mask is None:
886
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
887
+
888
+ if token_type_ids is None:
889
+ token_type_ids = tf.fill(dims=input_shape, value=0)
890
+
891
+ embedding_output = self.embeddings(
892
+ input_ids=input_ids,
893
+ position_ids=position_ids,
894
+ token_type_ids=token_type_ids,
895
+ inputs_embeds=inputs_embeds,
896
+ past_key_values_length=past_key_values_length,
897
+ training=training,
898
+ )
899
+
900
+ # We create a 3D attention mask from a 2D tensor mask.
901
+ # Sizes are [batch_size, 1, 1, to_seq_length]
902
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
903
+ # this attention mask is more simple than the triangular masking of causal attention
904
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
905
+ attention_mask_shape = shape_list(attention_mask)
906
+
907
+ mask_seq_length = seq_length + past_key_values_length
908
+ # Copied from `modeling_tf_t5.py`
909
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
910
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
911
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
912
+ if self.is_decoder:
913
+ seq_ids = tf.range(mask_seq_length)
914
+ causal_mask = tf.less_equal(
915
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
916
+ seq_ids[None, :, None],
917
+ )
918
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
919
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
920
+ attention_mask_shape = shape_list(extended_attention_mask)
921
+ extended_attention_mask = tf.reshape(
922
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
923
+ )
924
+ if past_key_values[0] is not None:
925
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
926
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
927
+ else:
928
+ extended_attention_mask = tf.reshape(
929
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
930
+ )
931
+
932
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
933
+ # masked positions, this operation will create a tensor which is 0.0 for
934
+ # positions we want to attend and -10000.0 for masked positions.
935
+ # Since we are adding it to the raw scores before the softmax, this is
936
+ # effectively the same as removing these entirely.
937
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
938
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
939
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
940
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
941
+
942
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
943
+ if self.is_decoder and encoder_attention_mask is not None:
944
+ # If a 2D ou 3D attention mask is provided for the cross-attention
945
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
946
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
947
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
948
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
949
+ if num_dims_encoder_attention_mask == 3:
950
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
951
+ if num_dims_encoder_attention_mask == 2:
952
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
953
+
954
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
955
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
956
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
957
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
958
+
959
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
960
+ else:
961
+ encoder_extended_attention_mask = None
962
+
963
+ # Prepare head mask if needed
964
+ # 1.0 in head_mask indicate we keep the head
965
+ # attention_probs has shape bsz x n_heads x N x N
966
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
967
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
968
+ if head_mask is not None:
969
+ raise NotImplementedError
970
+ else:
971
+ head_mask = [None] * self.config.num_hidden_layers
972
+
973
+ encoder_outputs = self.encoder(
974
+ hidden_states=embedding_output,
975
+ attention_mask=extended_attention_mask,
976
+ head_mask=head_mask,
977
+ encoder_hidden_states=encoder_hidden_states,
978
+ encoder_attention_mask=encoder_extended_attention_mask,
979
+ past_key_values=past_key_values,
980
+ use_cache=use_cache,
981
+ output_attentions=output_attentions,
982
+ output_hidden_states=output_hidden_states,
983
+ return_dict=return_dict,
984
+ training=training,
985
+ )
986
+
987
+ sequence_output = encoder_outputs[0]
988
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
989
+
990
+ if not return_dict:
991
+ return (
992
+ sequence_output,
993
+ pooled_output,
994
+ ) + encoder_outputs[1:]
995
+
996
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
997
+ last_hidden_state=sequence_output,
998
+ pooler_output=pooled_output,
999
+ past_key_values=encoder_outputs.past_key_values,
1000
+ hidden_states=encoder_outputs.hidden_states,
1001
+ attentions=encoder_outputs.attentions,
1002
+ cross_attentions=encoder_outputs.cross_attentions,
1003
+ )
1004
+
1005
+ def build(self, input_shape=None):
1006
+ if self.built:
1007
+ return
1008
+ self.built = True
1009
+ if getattr(self, "embeddings", None) is not None:
1010
+ with tf.name_scope(self.embeddings.name):
1011
+ self.embeddings.build(None)
1012
+ if getattr(self, "encoder", None) is not None:
1013
+ with tf.name_scope(self.encoder.name):
1014
+ self.encoder.build(None)
1015
+ if getattr(self, "pooler", None) is not None:
1016
+ with tf.name_scope(self.pooler.name):
1017
+ self.pooler.build(None)
1018
+
1019
+
1020
+ class TFBertPreTrainedModel(TFPreTrainedModel):
1021
+ """
1022
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1023
+ models.
1024
+ """
1025
+
1026
+ config_class = BertConfig
1027
+ base_model_prefix = "bert"
1028
+
1029
+
1030
+ @dataclass
1031
+ class TFBertForPreTrainingOutput(ModelOutput):
1032
+ """
1033
+ Output type of [`TFBertForPreTraining`].
1034
+
1035
+ Args:
1036
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1037
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1038
+ seq_relationship_logits (`tf.Tensor` of shape `(batch_size, 2)`):
1039
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
1040
+ before SoftMax).
1041
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1042
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1043
+ `(batch_size, sequence_length, hidden_size)`.
1044
+
1045
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1046
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1047
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1048
+ sequence_length)`.
1049
+
1050
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1051
+ heads.
1052
+ """
1053
+
1054
+ loss: tf.Tensor | None = None
1055
+ prediction_logits: tf.Tensor = None
1056
+ seq_relationship_logits: tf.Tensor = None
1057
+ hidden_states: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
1058
+ attentions: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
1059
+
1060
+
1061
+ BERT_START_DOCSTRING = r"""
1062
+
1063
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1064
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1065
+ etc.)
1066
+
1067
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1068
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1069
+ behavior.
1070
+
1071
+ <Tip>
1072
+
1073
+ TensorFlow models and layers in `transformers` accept two formats as input:
1074
+
1075
+ - having all inputs as keyword arguments (like PyTorch models), or
1076
+ - having all inputs as a list, tuple or dict in the first positional argument.
1077
+
1078
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1079
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1080
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1081
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1082
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1083
+ positional argument:
1084
+
1085
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1086
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1087
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1088
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1089
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1090
+
1091
+ Note that when creating models and layers with
1092
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1093
+ about any of this, as you can just pass inputs like you would to any other Python function!
1094
+
1095
+ </Tip>
1096
+
1097
+ Args:
1098
+ config ([`BertConfig`]): Model configuration class with all the parameters of the model.
1099
+ Initializing with a config file does not load the weights associated with the model, only the
1100
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
1101
+ """
1102
+
1103
+ BERT_INPUTS_DOCSTRING = r"""
1104
+ Args:
1105
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1106
+ Indices of input sequence tokens in the vocabulary.
1107
+
1108
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1109
+ [`PreTrainedTokenizer.encode`] for details.
1110
+
1111
+ [What are input IDs?](../glossary#input-ids)
1112
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1113
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1114
+
1115
+ - 1 for tokens that are **not masked**,
1116
+ - 0 for tokens that are **masked**.
1117
+
1118
+ [What are attention masks?](../glossary#attention-mask)
1119
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1120
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1121
+ 1]`:
1122
+
1123
+ - 0 corresponds to a *sentence A* token,
1124
+ - 1 corresponds to a *sentence B* token.
1125
+
1126
+ [What are token type IDs?](../glossary#token-type-ids)
1127
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1128
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1129
+ config.max_position_embeddings - 1]`.
1130
+
1131
+ [What are position IDs?](../glossary#position-ids)
1132
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1133
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1134
+
1135
+ - 1 indicates the head is **not masked**,
1136
+ - 0 indicates the head is **masked**.
1137
+
1138
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1139
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1140
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1141
+ model's internal embedding lookup matrix.
1142
+ output_attentions (`bool`, *optional*):
1143
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1144
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1145
+ config will be used instead.
1146
+ output_hidden_states (`bool`, *optional*):
1147
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1148
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1149
+ used instead.
1150
+ return_dict (`bool`, *optional*):
1151
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1152
+ eager mode, in graph mode the value will always be set to True.
1153
+ training (`bool`, *optional*, defaults to `False``):
1154
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1155
+ behaviors between training and evaluation).
1156
+ """
1157
+
1158
+
1159
+ @add_start_docstrings(
1160
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
1161
+ BERT_START_DOCSTRING,
1162
+ )
1163
+ class TFBertModel(TFBertPreTrainedModel):
1164
+ def __init__(self, config: BertConfig, add_pooling_layer: bool = True, *inputs, **kwargs):
1165
+ super().__init__(config, *inputs, **kwargs)
1166
+
1167
+ self.bert = TFBertMainLayer(config, add_pooling_layer, name="bert")
1168
+
1169
+ @unpack_inputs
1170
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1171
+ @add_code_sample_docstrings(
1172
+ checkpoint=_CHECKPOINT_FOR_DOC,
1173
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1174
+ config_class=_CONFIG_FOR_DOC,
1175
+ )
1176
+ def call(
1177
+ self,
1178
+ input_ids: TFModelInputType | None = None,
1179
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1180
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1181
+ position_ids: np.ndarray | tf.Tensor | None = None,
1182
+ head_mask: np.ndarray | tf.Tensor | None = None,
1183
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1184
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1185
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1186
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1187
+ use_cache: Optional[bool] = None,
1188
+ output_attentions: Optional[bool] = None,
1189
+ output_hidden_states: Optional[bool] = None,
1190
+ return_dict: Optional[bool] = None,
1191
+ training: Optional[bool] = False,
1192
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
1193
+ r"""
1194
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1195
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1196
+ the model is configured as a decoder.
1197
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1198
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1199
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1200
+
1201
+ - 1 for tokens that are **not masked**,
1202
+ - 0 for tokens that are **masked**.
1203
+
1204
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1205
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1206
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1207
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1208
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1209
+ use_cache (`bool`, *optional*, defaults to `True`):
1210
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1211
+ `past_key_values`). Set to `False` during training, `True` during generation
1212
+ """
1213
+ outputs = self.bert(
1214
+ input_ids=input_ids,
1215
+ attention_mask=attention_mask,
1216
+ token_type_ids=token_type_ids,
1217
+ position_ids=position_ids,
1218
+ head_mask=head_mask,
1219
+ inputs_embeds=inputs_embeds,
1220
+ encoder_hidden_states=encoder_hidden_states,
1221
+ encoder_attention_mask=encoder_attention_mask,
1222
+ past_key_values=past_key_values,
1223
+ use_cache=use_cache,
1224
+ output_attentions=output_attentions,
1225
+ output_hidden_states=output_hidden_states,
1226
+ return_dict=return_dict,
1227
+ training=training,
1228
+ )
1229
+ return outputs
1230
+
1231
+ def build(self, input_shape=None):
1232
+ if self.built:
1233
+ return
1234
+ self.built = True
1235
+ if getattr(self, "bert", None) is not None:
1236
+ with tf.name_scope(self.bert.name):
1237
+ self.bert.build(None)
1238
+
1239
+
1240
+ @add_start_docstrings(
1241
+ """
1242
+ Bert Model with two heads on top as done during the pretraining:
1243
+ a `masked language modeling` head and a `next sentence prediction (classification)` head.
1244
+ """,
1245
+ BERT_START_DOCSTRING,
1246
+ )
1247
+ class TFBertForPreTraining(TFBertPreTrainedModel, TFBertPreTrainingLoss):
1248
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1249
+ _keys_to_ignore_on_load_unexpected = [
1250
+ r"position_ids",
1251
+ r"cls.predictions.decoder.weight",
1252
+ r"cls.predictions.decoder.bias",
1253
+ ]
1254
+
1255
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1256
+ super().__init__(config, *inputs, **kwargs)
1257
+
1258
+ self.bert = TFBertMainLayer(config, name="bert")
1259
+ self.nsp = TFBertNSPHead(config, name="nsp___cls")
1260
+ self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
1261
+
1262
+ def get_lm_head(self) -> keras.layers.Layer:
1263
+ return self.mlm.predictions
1264
+
1265
+ def get_prefix_bias_name(self) -> str:
1266
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1267
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1268
+
1269
+ @unpack_inputs
1270
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1271
+ @replace_return_docstrings(output_type=TFBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1272
+ def call(
1273
+ self,
1274
+ input_ids: TFModelInputType | None = None,
1275
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1276
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1277
+ position_ids: np.ndarray | tf.Tensor | None = None,
1278
+ head_mask: np.ndarray | tf.Tensor | None = None,
1279
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1280
+ output_attentions: Optional[bool] = None,
1281
+ output_hidden_states: Optional[bool] = None,
1282
+ return_dict: Optional[bool] = None,
1283
+ labels: np.ndarray | tf.Tensor | None = None,
1284
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1285
+ training: Optional[bool] = False,
1286
+ ) -> Union[TFBertForPreTrainingOutput, Tuple[tf.Tensor]]:
1287
+ r"""
1288
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1289
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1290
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1291
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1292
+ next_sentence_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1293
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1294
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
1295
+
1296
+ - 0 indicates sequence B is a continuation of sequence A,
1297
+ - 1 indicates sequence B is a random sequence.
1298
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1299
+ Used to hide legacy arguments that have been deprecated.
1300
+
1301
+ Return:
1302
+
1303
+ Examples:
1304
+
1305
+ ```python
1306
+ >>> import tensorflow as tf
1307
+ >>> from transformers import AutoTokenizer, TFBertForPreTraining
1308
+
1309
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1310
+ >>> model = TFBertForPreTraining.from_pretrained("google-bert/bert-base-uncased")
1311
+ >>> input_ids = tokenizer("Hello, my dog is cute", add_special_tokens=True, return_tensors="tf")
1312
+ >>> # Batch size 1
1313
+
1314
+ >>> outputs = model(input_ids)
1315
+ >>> prediction_logits, seq_relationship_logits = outputs[:2]
1316
+ ```"""
1317
+ outputs = self.bert(
1318
+ input_ids=input_ids,
1319
+ attention_mask=attention_mask,
1320
+ token_type_ids=token_type_ids,
1321
+ position_ids=position_ids,
1322
+ head_mask=head_mask,
1323
+ inputs_embeds=inputs_embeds,
1324
+ output_attentions=output_attentions,
1325
+ output_hidden_states=output_hidden_states,
1326
+ return_dict=return_dict,
1327
+ training=training,
1328
+ )
1329
+ sequence_output, pooled_output = outputs[:2]
1330
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1331
+ seq_relationship_score = self.nsp(pooled_output=pooled_output)
1332
+ total_loss = None
1333
+
1334
+ if labels is not None and next_sentence_label is not None:
1335
+ d_labels = {"labels": labels}
1336
+ d_labels["next_sentence_label"] = next_sentence_label
1337
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))
1338
+
1339
+ if not return_dict:
1340
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1341
+ return ((total_loss,) + output) if total_loss is not None else output
1342
+
1343
+ return TFBertForPreTrainingOutput(
1344
+ loss=total_loss,
1345
+ prediction_logits=prediction_scores,
1346
+ seq_relationship_logits=seq_relationship_score,
1347
+ hidden_states=outputs.hidden_states,
1348
+ attentions=outputs.attentions,
1349
+ )
1350
+
1351
+ def build(self, input_shape=None):
1352
+ if self.built:
1353
+ return
1354
+ self.built = True
1355
+ if getattr(self, "bert", None) is not None:
1356
+ with tf.name_scope(self.bert.name):
1357
+ self.bert.build(None)
1358
+ if getattr(self, "nsp", None) is not None:
1359
+ with tf.name_scope(self.nsp.name):
1360
+ self.nsp.build(None)
1361
+ if getattr(self, "mlm", None) is not None:
1362
+ with tf.name_scope(self.mlm.name):
1363
+ self.mlm.build(None)
1364
+
1365
+
1366
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
1367
+ class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss):
1368
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1369
+ _keys_to_ignore_on_load_unexpected = [
1370
+ r"pooler",
1371
+ r"cls.seq_relationship",
1372
+ r"cls.predictions.decoder.weight",
1373
+ r"nsp___cls",
1374
+ ]
1375
+
1376
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1377
+ super().__init__(config, *inputs, **kwargs)
1378
+
1379
+ if config.is_decoder:
1380
+ logger.warning(
1381
+ "If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for "
1382
+ "bi-directional self-attention."
1383
+ )
1384
+
1385
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
1386
+ self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
1387
+
1388
+ def get_lm_head(self) -> keras.layers.Layer:
1389
+ return self.mlm.predictions
1390
+
1391
+ def get_prefix_bias_name(self) -> str:
1392
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1393
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1394
+
1395
+ @unpack_inputs
1396
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1397
+ @add_code_sample_docstrings(
1398
+ checkpoint=_CHECKPOINT_FOR_DOC,
1399
+ output_type=TFMaskedLMOutput,
1400
+ config_class=_CONFIG_FOR_DOC,
1401
+ expected_output="'paris'",
1402
+ expected_loss=0.88,
1403
+ )
1404
+ def call(
1405
+ self,
1406
+ input_ids: TFModelInputType | None = None,
1407
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1408
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1409
+ position_ids: np.ndarray | tf.Tensor | None = None,
1410
+ head_mask: np.ndarray | tf.Tensor | None = None,
1411
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1412
+ output_attentions: Optional[bool] = None,
1413
+ output_hidden_states: Optional[bool] = None,
1414
+ return_dict: Optional[bool] = None,
1415
+ labels: np.ndarray | tf.Tensor | None = None,
1416
+ training: Optional[bool] = False,
1417
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1418
+ r"""
1419
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1420
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1421
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1422
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1423
+ """
1424
+ outputs = self.bert(
1425
+ input_ids=input_ids,
1426
+ attention_mask=attention_mask,
1427
+ token_type_ids=token_type_ids,
1428
+ position_ids=position_ids,
1429
+ head_mask=head_mask,
1430
+ inputs_embeds=inputs_embeds,
1431
+ output_attentions=output_attentions,
1432
+ output_hidden_states=output_hidden_states,
1433
+ return_dict=return_dict,
1434
+ training=training,
1435
+ )
1436
+ sequence_output = outputs[0]
1437
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1438
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1439
+
1440
+ if not return_dict:
1441
+ output = (prediction_scores,) + outputs[2:]
1442
+ return ((loss,) + output) if loss is not None else output
1443
+
1444
+ return TFMaskedLMOutput(
1445
+ loss=loss,
1446
+ logits=prediction_scores,
1447
+ hidden_states=outputs.hidden_states,
1448
+ attentions=outputs.attentions,
1449
+ )
1450
+
1451
+ def build(self, input_shape=None):
1452
+ if self.built:
1453
+ return
1454
+ self.built = True
1455
+ if getattr(self, "bert", None) is not None:
1456
+ with tf.name_scope(self.bert.name):
1457
+ self.bert.build(None)
1458
+ if getattr(self, "mlm", None) is not None:
1459
+ with tf.name_scope(self.mlm.name):
1460
+ self.mlm.build(None)
1461
+
1462
+
1463
+ class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss):
1464
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1465
+ _keys_to_ignore_on_load_unexpected = [
1466
+ r"pooler",
1467
+ r"cls.seq_relationship",
1468
+ r"cls.predictions.decoder.weight",
1469
+ r"nsp___cls",
1470
+ ]
1471
+
1472
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1473
+ super().__init__(config, *inputs, **kwargs)
1474
+
1475
+ if not config.is_decoder:
1476
+ logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`")
1477
+
1478
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
1479
+ self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
1480
+
1481
+ def get_lm_head(self) -> keras.layers.Layer:
1482
+ return self.mlm.predictions
1483
+
1484
+ def get_prefix_bias_name(self) -> str:
1485
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1486
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1487
+
1488
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1489
+ input_shape = input_ids.shape
1490
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1491
+ if attention_mask is None:
1492
+ attention_mask = tf.ones(input_shape)
1493
+
1494
+ # cut decoder_input_ids if past is used
1495
+ if past_key_values is not None:
1496
+ input_ids = input_ids[:, -1:]
1497
+
1498
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1499
+
1500
+ @unpack_inputs
1501
+ @add_code_sample_docstrings(
1502
+ checkpoint=_CHECKPOINT_FOR_DOC,
1503
+ output_type=TFCausalLMOutputWithCrossAttentions,
1504
+ config_class=_CONFIG_FOR_DOC,
1505
+ )
1506
+ def call(
1507
+ self,
1508
+ input_ids: TFModelInputType | None = None,
1509
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1510
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1511
+ position_ids: np.ndarray | tf.Tensor | None = None,
1512
+ head_mask: np.ndarray | tf.Tensor | None = None,
1513
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1514
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1515
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1516
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1517
+ use_cache: Optional[bool] = None,
1518
+ output_attentions: Optional[bool] = None,
1519
+ output_hidden_states: Optional[bool] = None,
1520
+ return_dict: Optional[bool] = None,
1521
+ labels: np.ndarray | tf.Tensor | None = None,
1522
+ training: Optional[bool] = False,
1523
+ **kwargs,
1524
+ ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:
1525
+ r"""
1526
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1527
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1528
+ the model is configured as a decoder.
1529
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1530
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1531
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1532
+
1533
+ - 1 for tokens that are **not masked**,
1534
+ - 0 for tokens that are **masked**.
1535
+
1536
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1537
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1538
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1539
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1540
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1541
+ use_cache (`bool`, *optional*, defaults to `True`):
1542
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1543
+ `past_key_values`). Set to `False` during training, `True` during generation
1544
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1545
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
1546
+ config.vocab_size - 1]`.
1547
+ """
1548
+ outputs = self.bert(
1549
+ input_ids=input_ids,
1550
+ attention_mask=attention_mask,
1551
+ token_type_ids=token_type_ids,
1552
+ position_ids=position_ids,
1553
+ head_mask=head_mask,
1554
+ inputs_embeds=inputs_embeds,
1555
+ encoder_hidden_states=encoder_hidden_states,
1556
+ encoder_attention_mask=encoder_attention_mask,
1557
+ past_key_values=past_key_values,
1558
+ use_cache=use_cache,
1559
+ output_attentions=output_attentions,
1560
+ output_hidden_states=output_hidden_states,
1561
+ return_dict=return_dict,
1562
+ training=training,
1563
+ )
1564
+ sequence_output = outputs[0]
1565
+ logits = self.mlm(sequence_output=sequence_output, training=training)
1566
+ loss = None
1567
+
1568
+ if labels is not None:
1569
+ # shift labels to the left and cut last logit token
1570
+ shifted_logits = logits[:, :-1]
1571
+ labels = labels[:, 1:]
1572
+ loss = self.hf_compute_loss(labels=labels, logits=shifted_logits)
1573
+
1574
+ if not return_dict:
1575
+ output = (logits,) + outputs[2:]
1576
+ return ((loss,) + output) if loss is not None else output
1577
+
1578
+ return TFCausalLMOutputWithCrossAttentions(
1579
+ loss=loss,
1580
+ logits=logits,
1581
+ past_key_values=outputs.past_key_values,
1582
+ hidden_states=outputs.hidden_states,
1583
+ attentions=outputs.attentions,
1584
+ cross_attentions=outputs.cross_attentions,
1585
+ )
1586
+
1587
+ def build(self, input_shape=None):
1588
+ if self.built:
1589
+ return
1590
+ self.built = True
1591
+ if getattr(self, "bert", None) is not None:
1592
+ with tf.name_scope(self.bert.name):
1593
+ self.bert.build(None)
1594
+ if getattr(self, "mlm", None) is not None:
1595
+ with tf.name_scope(self.mlm.name):
1596
+ self.mlm.build(None)
1597
+
1598
+
1599
+ @add_start_docstrings(
1600
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1601
+ BERT_START_DOCSTRING,
1602
+ )
1603
+ class TFBertForNextSentencePrediction(TFBertPreTrainedModel, TFNextSentencePredictionLoss):
1604
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1605
+ _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"cls.predictions"]
1606
+
1607
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1608
+ super().__init__(config, *inputs, **kwargs)
1609
+
1610
+ self.bert = TFBertMainLayer(config, name="bert")
1611
+ self.nsp = TFBertNSPHead(config, name="nsp___cls")
1612
+
1613
+ @unpack_inputs
1614
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1615
+ @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1616
+ def call(
1617
+ self,
1618
+ input_ids: TFModelInputType | None = None,
1619
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1620
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1621
+ position_ids: np.ndarray | tf.Tensor | None = None,
1622
+ head_mask: np.ndarray | tf.Tensor | None = None,
1623
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1624
+ output_attentions: Optional[bool] = None,
1625
+ output_hidden_states: Optional[bool] = None,
1626
+ return_dict: Optional[bool] = None,
1627
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1628
+ training: Optional[bool] = False,
1629
+ ) -> Union[TFNextSentencePredictorOutput, Tuple[tf.Tensor]]:
1630
+ r"""
1631
+ Return:
1632
+
1633
+ Examples:
1634
+
1635
+ ```python
1636
+ >>> import tensorflow as tf
1637
+ >>> from transformers import AutoTokenizer, TFBertForNextSentencePrediction
1638
+
1639
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1640
+ >>> model = TFBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1641
+
1642
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1643
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1644
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="tf")
1645
+
1646
+ >>> logits = model(encoding["input_ids"], token_type_ids=encoding["token_type_ids"])[0]
1647
+ >>> assert logits[0][0] < logits[0][1] # the next sentence was random
1648
+ ```"""
1649
+ outputs = self.bert(
1650
+ input_ids=input_ids,
1651
+ attention_mask=attention_mask,
1652
+ token_type_ids=token_type_ids,
1653
+ position_ids=position_ids,
1654
+ head_mask=head_mask,
1655
+ inputs_embeds=inputs_embeds,
1656
+ output_attentions=output_attentions,
1657
+ output_hidden_states=output_hidden_states,
1658
+ return_dict=return_dict,
1659
+ training=training,
1660
+ )
1661
+ pooled_output = outputs[1]
1662
+ seq_relationship_scores = self.nsp(pooled_output=pooled_output)
1663
+ next_sentence_loss = (
1664
+ None
1665
+ if next_sentence_label is None
1666
+ else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores)
1667
+ )
1668
+
1669
+ if not return_dict:
1670
+ output = (seq_relationship_scores,) + outputs[2:]
1671
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1672
+
1673
+ return TFNextSentencePredictorOutput(
1674
+ loss=next_sentence_loss,
1675
+ logits=seq_relationship_scores,
1676
+ hidden_states=outputs.hidden_states,
1677
+ attentions=outputs.attentions,
1678
+ )
1679
+
1680
+ def build(self, input_shape=None):
1681
+ if self.built:
1682
+ return
1683
+ self.built = True
1684
+ if getattr(self, "bert", None) is not None:
1685
+ with tf.name_scope(self.bert.name):
1686
+ self.bert.build(None)
1687
+ if getattr(self, "nsp", None) is not None:
1688
+ with tf.name_scope(self.nsp.name):
1689
+ self.nsp.build(None)
1690
+
1691
+
1692
+ @add_start_docstrings(
1693
+ """
1694
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1695
+ output) e.g. for GLUE tasks.
1696
+ """,
1697
+ BERT_START_DOCSTRING,
1698
+ )
1699
+ class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassificationLoss):
1700
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1701
+ _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
1702
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1703
+
1704
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1705
+ super().__init__(config, *inputs, **kwargs)
1706
+
1707
+ self.num_labels = config.num_labels
1708
+
1709
+ self.bert = TFBertMainLayer(config, name="bert")
1710
+ classifier_dropout = (
1711
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1712
+ )
1713
+ self.dropout = keras.layers.Dropout(rate=classifier_dropout)
1714
+ self.classifier = keras.layers.Dense(
1715
+ units=config.num_labels,
1716
+ kernel_initializer=get_initializer(config.initializer_range),
1717
+ name="classifier",
1718
+ )
1719
+ self.config = config
1720
+
1721
+ @unpack_inputs
1722
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1723
+ @add_code_sample_docstrings(
1724
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1725
+ output_type=TFSequenceClassifierOutput,
1726
+ config_class=_CONFIG_FOR_DOC,
1727
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1728
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1729
+ )
1730
+ def call(
1731
+ self,
1732
+ input_ids: TFModelInputType | None = None,
1733
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1734
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1735
+ position_ids: np.ndarray | tf.Tensor | None = None,
1736
+ head_mask: np.ndarray | tf.Tensor | None = None,
1737
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1738
+ output_attentions: Optional[bool] = None,
1739
+ output_hidden_states: Optional[bool] = None,
1740
+ return_dict: Optional[bool] = None,
1741
+ labels: np.ndarray | tf.Tensor | None = None,
1742
+ training: Optional[bool] = False,
1743
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1744
+ r"""
1745
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1746
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1747
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1748
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1749
+ """
1750
+ outputs = self.bert(
1751
+ input_ids=input_ids,
1752
+ attention_mask=attention_mask,
1753
+ token_type_ids=token_type_ids,
1754
+ position_ids=position_ids,
1755
+ head_mask=head_mask,
1756
+ inputs_embeds=inputs_embeds,
1757
+ output_attentions=output_attentions,
1758
+ output_hidden_states=output_hidden_states,
1759
+ return_dict=return_dict,
1760
+ training=training,
1761
+ )
1762
+ pooled_output = outputs[1]
1763
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1764
+ logits = self.classifier(inputs=pooled_output)
1765
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1766
+
1767
+ if not return_dict:
1768
+ output = (logits,) + outputs[2:]
1769
+ return ((loss,) + output) if loss is not None else output
1770
+
1771
+ return TFSequenceClassifierOutput(
1772
+ loss=loss,
1773
+ logits=logits,
1774
+ hidden_states=outputs.hidden_states,
1775
+ attentions=outputs.attentions,
1776
+ )
1777
+
1778
+ def build(self, input_shape=None):
1779
+ if self.built:
1780
+ return
1781
+ self.built = True
1782
+ if getattr(self, "bert", None) is not None:
1783
+ with tf.name_scope(self.bert.name):
1784
+ self.bert.build(None)
1785
+ if getattr(self, "classifier", None) is not None:
1786
+ with tf.name_scope(self.classifier.name):
1787
+ self.classifier.build([None, None, self.config.hidden_size])
1788
+
1789
+
1790
+ @add_start_docstrings(
1791
+ """
1792
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1793
+ softmax) e.g. for RocStories/SWAG tasks.
1794
+ """,
1795
+ BERT_START_DOCSTRING,
1796
+ )
1797
+ class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss):
1798
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1799
+ _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
1800
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1801
+
1802
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1803
+ super().__init__(config, *inputs, **kwargs)
1804
+
1805
+ self.bert = TFBertMainLayer(config, name="bert")
1806
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1807
+ self.classifier = keras.layers.Dense(
1808
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1809
+ )
1810
+ self.config = config
1811
+
1812
+ @unpack_inputs
1813
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1814
+ @add_code_sample_docstrings(
1815
+ checkpoint=_CHECKPOINT_FOR_DOC,
1816
+ output_type=TFMultipleChoiceModelOutput,
1817
+ config_class=_CONFIG_FOR_DOC,
1818
+ )
1819
+ def call(
1820
+ self,
1821
+ input_ids: TFModelInputType | None = None,
1822
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1823
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1824
+ position_ids: np.ndarray | tf.Tensor | None = None,
1825
+ head_mask: np.ndarray | tf.Tensor | None = None,
1826
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1827
+ output_attentions: Optional[bool] = None,
1828
+ output_hidden_states: Optional[bool] = None,
1829
+ return_dict: Optional[bool] = None,
1830
+ labels: np.ndarray | tf.Tensor | None = None,
1831
+ training: Optional[bool] = False,
1832
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1833
+ r"""
1834
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1835
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1836
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1837
+ """
1838
+ if input_ids is not None:
1839
+ num_choices = shape_list(input_ids)[1]
1840
+ seq_length = shape_list(input_ids)[2]
1841
+ else:
1842
+ num_choices = shape_list(inputs_embeds)[1]
1843
+ seq_length = shape_list(inputs_embeds)[2]
1844
+
1845
+ flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
1846
+ flat_attention_mask = (
1847
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
1848
+ )
1849
+ flat_token_type_ids = (
1850
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
1851
+ )
1852
+ flat_position_ids = (
1853
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
1854
+ )
1855
+ flat_inputs_embeds = (
1856
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
1857
+ if inputs_embeds is not None
1858
+ else None
1859
+ )
1860
+ outputs = self.bert(
1861
+ input_ids=flat_input_ids,
1862
+ attention_mask=flat_attention_mask,
1863
+ token_type_ids=flat_token_type_ids,
1864
+ position_ids=flat_position_ids,
1865
+ head_mask=head_mask,
1866
+ inputs_embeds=flat_inputs_embeds,
1867
+ output_attentions=output_attentions,
1868
+ output_hidden_states=output_hidden_states,
1869
+ return_dict=return_dict,
1870
+ training=training,
1871
+ )
1872
+ pooled_output = outputs[1]
1873
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1874
+ logits = self.classifier(inputs=pooled_output)
1875
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
1876
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
1877
+
1878
+ if not return_dict:
1879
+ output = (reshaped_logits,) + outputs[2:]
1880
+ return ((loss,) + output) if loss is not None else output
1881
+
1882
+ return TFMultipleChoiceModelOutput(
1883
+ loss=loss,
1884
+ logits=reshaped_logits,
1885
+ hidden_states=outputs.hidden_states,
1886
+ attentions=outputs.attentions,
1887
+ )
1888
+
1889
+ def build(self, input_shape=None):
1890
+ if self.built:
1891
+ return
1892
+ self.built = True
1893
+ if getattr(self, "bert", None) is not None:
1894
+ with tf.name_scope(self.bert.name):
1895
+ self.bert.build(None)
1896
+ if getattr(self, "classifier", None) is not None:
1897
+ with tf.name_scope(self.classifier.name):
1898
+ self.classifier.build([None, None, self.config.hidden_size])
1899
+
1900
+
1901
+ @add_start_docstrings(
1902
+ """
1903
+ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1904
+ Named-Entity-Recognition (NER) tasks.
1905
+ """,
1906
+ BERT_START_DOCSTRING,
1907
+ )
1908
+ class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationLoss):
1909
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1910
+ _keys_to_ignore_on_load_unexpected = [
1911
+ r"pooler",
1912
+ r"mlm___cls",
1913
+ r"nsp___cls",
1914
+ r"cls.predictions",
1915
+ r"cls.seq_relationship",
1916
+ ]
1917
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1918
+
1919
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
1920
+ super().__init__(config, *inputs, **kwargs)
1921
+
1922
+ self.num_labels = config.num_labels
1923
+
1924
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
1925
+ classifier_dropout = (
1926
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1927
+ )
1928
+ self.dropout = keras.layers.Dropout(rate=classifier_dropout)
1929
+ self.classifier = keras.layers.Dense(
1930
+ units=config.num_labels,
1931
+ kernel_initializer=get_initializer(config.initializer_range),
1932
+ name="classifier",
1933
+ )
1934
+ self.config = config
1935
+
1936
+ @unpack_inputs
1937
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1938
+ @add_code_sample_docstrings(
1939
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1940
+ output_type=TFTokenClassifierOutput,
1941
+ config_class=_CONFIG_FOR_DOC,
1942
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1943
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1944
+ )
1945
+ def call(
1946
+ self,
1947
+ input_ids: TFModelInputType | None = None,
1948
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1949
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1950
+ position_ids: np.ndarray | tf.Tensor | None = None,
1951
+ head_mask: np.ndarray | tf.Tensor | None = None,
1952
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1953
+ output_attentions: Optional[bool] = None,
1954
+ output_hidden_states: Optional[bool] = None,
1955
+ return_dict: Optional[bool] = None,
1956
+ labels: np.ndarray | tf.Tensor | None = None,
1957
+ training: Optional[bool] = False,
1958
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1959
+ r"""
1960
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1961
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1962
+ """
1963
+ outputs = self.bert(
1964
+ input_ids=input_ids,
1965
+ attention_mask=attention_mask,
1966
+ token_type_ids=token_type_ids,
1967
+ position_ids=position_ids,
1968
+ head_mask=head_mask,
1969
+ inputs_embeds=inputs_embeds,
1970
+ output_attentions=output_attentions,
1971
+ output_hidden_states=output_hidden_states,
1972
+ return_dict=return_dict,
1973
+ training=training,
1974
+ )
1975
+ sequence_output = outputs[0]
1976
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
1977
+ logits = self.classifier(inputs=sequence_output)
1978
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1979
+
1980
+ if not return_dict:
1981
+ output = (logits,) + outputs[2:]
1982
+ return ((loss,) + output) if loss is not None else output
1983
+
1984
+ return TFTokenClassifierOutput(
1985
+ loss=loss,
1986
+ logits=logits,
1987
+ hidden_states=outputs.hidden_states,
1988
+ attentions=outputs.attentions,
1989
+ )
1990
+
1991
+ def build(self, input_shape=None):
1992
+ if self.built:
1993
+ return
1994
+ self.built = True
1995
+ if getattr(self, "bert", None) is not None:
1996
+ with tf.name_scope(self.bert.name):
1997
+ self.bert.build(None)
1998
+ if getattr(self, "classifier", None) is not None:
1999
+ with tf.name_scope(self.classifier.name):
2000
+ self.classifier.build([None, None, self.config.hidden_size])
2001
+
2002
+
2003
+ @add_start_docstrings(
2004
+ """
2005
+ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
2006
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
2007
+ """,
2008
+ BERT_START_DOCSTRING,
2009
+ )
2010
+ class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss):
2011
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
2012
+ _keys_to_ignore_on_load_unexpected = [
2013
+ r"pooler",
2014
+ r"mlm___cls",
2015
+ r"nsp___cls",
2016
+ r"cls.predictions",
2017
+ r"cls.seq_relationship",
2018
+ ]
2019
+
2020
+ def __init__(self, config: BertConfig, *inputs, **kwargs):
2021
+ super().__init__(config, *inputs, **kwargs)
2022
+
2023
+ self.num_labels = config.num_labels
2024
+
2025
+ self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
2026
+ self.qa_outputs = keras.layers.Dense(
2027
+ units=config.num_labels,
2028
+ kernel_initializer=get_initializer(config.initializer_range),
2029
+ name="qa_outputs",
2030
+ )
2031
+ self.config = config
2032
+
2033
+ @unpack_inputs
2034
+ @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
2035
+ @add_code_sample_docstrings(
2036
+ checkpoint=_CHECKPOINT_FOR_QA,
2037
+ output_type=TFQuestionAnsweringModelOutput,
2038
+ config_class=_CONFIG_FOR_DOC,
2039
+ qa_target_start_index=_QA_TARGET_START_INDEX,
2040
+ qa_target_end_index=_QA_TARGET_END_INDEX,
2041
+ expected_output=_QA_EXPECTED_OUTPUT,
2042
+ expected_loss=_QA_EXPECTED_LOSS,
2043
+ )
2044
+ def call(
2045
+ self,
2046
+ input_ids: TFModelInputType | None = None,
2047
+ attention_mask: np.ndarray | tf.Tensor | None = None,
2048
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
2049
+ position_ids: np.ndarray | tf.Tensor | None = None,
2050
+ head_mask: np.ndarray | tf.Tensor | None = None,
2051
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
2052
+ output_attentions: Optional[bool] = None,
2053
+ output_hidden_states: Optional[bool] = None,
2054
+ return_dict: Optional[bool] = None,
2055
+ start_positions: np.ndarray | tf.Tensor | None = None,
2056
+ end_positions: np.ndarray | tf.Tensor | None = None,
2057
+ training: Optional[bool] = False,
2058
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
2059
+ r"""
2060
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
2061
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
2062
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
2063
+ are not taken into account for computing the loss.
2064
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
2065
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
2066
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
2067
+ are not taken into account for computing the loss.
2068
+ """
2069
+ outputs = self.bert(
2070
+ input_ids=input_ids,
2071
+ attention_mask=attention_mask,
2072
+ token_type_ids=token_type_ids,
2073
+ position_ids=position_ids,
2074
+ head_mask=head_mask,
2075
+ inputs_embeds=inputs_embeds,
2076
+ output_attentions=output_attentions,
2077
+ output_hidden_states=output_hidden_states,
2078
+ return_dict=return_dict,
2079
+ training=training,
2080
+ )
2081
+ sequence_output = outputs[0]
2082
+ logits = self.qa_outputs(inputs=sequence_output)
2083
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
2084
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
2085
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
2086
+ loss = None
2087
+
2088
+ if start_positions is not None and end_positions is not None:
2089
+ labels = {"start_position": start_positions}
2090
+ labels["end_position"] = end_positions
2091
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
2092
+
2093
+ if not return_dict:
2094
+ output = (start_logits, end_logits) + outputs[2:]
2095
+ return ((loss,) + output) if loss is not None else output
2096
+
2097
+ return TFQuestionAnsweringModelOutput(
2098
+ loss=loss,
2099
+ start_logits=start_logits,
2100
+ end_logits=end_logits,
2101
+ hidden_states=outputs.hidden_states,
2102
+ attentions=outputs.attentions,
2103
+ )
2104
+
2105
+ def build(self, input_shape=None):
2106
+ if self.built:
2107
+ return
2108
+ self.built = True
2109
+ if getattr(self, "bert", None) is not None:
2110
+ with tf.name_scope(self.bert.name):
2111
+ self.bert.build(None)
2112
+ if getattr(self, "qa_outputs", None) is not None:
2113
+ with tf.name_scope(self.qa_outputs.name):
2114
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast Tokenization classes for Bert."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_bert import BertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+
32
+ class BertTokenizerFast(PreTrainedTokenizerFast):
33
+ r"""
34
+ Construct a "fast" BERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
35
+
36
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
37
+ refer to this superclass for more information regarding those methods.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ File containing the vocabulary.
42
+ do_lower_case (`bool`, *optional*, defaults to `True`):
43
+ Whether or not to lowercase the input when tokenizing.
44
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
48
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
49
+ sequence classification or for a text and a question for question answering. It is also used as the last
50
+ token of a sequence built with special tokens.
51
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
52
+ The token used for padding, for example when batching sequences of different lengths.
53
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
54
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
55
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
56
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
57
+ The token used for masking values. This is the token used when training this model with masked language
58
+ modeling. This is the token which the model will try to predict.
59
+ clean_text (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
61
+ whitespaces by the classic one.
62
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
64
+ issue](https://github.com/huggingface/transformers/issues/328)).
65
+ strip_accents (`bool`, *optional*):
66
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
67
+ value for `lowercase` (as in the original BERT).
68
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
69
+ The prefix for subwords.
70
+ """
71
+
72
+ vocab_files_names = VOCAB_FILES_NAMES
73
+ slow_tokenizer_class = BertTokenizer
74
+
75
+ def __init__(
76
+ self,
77
+ vocab_file=None,
78
+ tokenizer_file=None,
79
+ do_lower_case=True,
80
+ unk_token="[UNK]",
81
+ sep_token="[SEP]",
82
+ pad_token="[PAD]",
83
+ cls_token="[CLS]",
84
+ mask_token="[MASK]",
85
+ tokenize_chinese_chars=True,
86
+ strip_accents=None,
87
+ **kwargs,
88
+ ):
89
+ super().__init__(
90
+ vocab_file,
91
+ tokenizer_file=tokenizer_file,
92
+ do_lower_case=do_lower_case,
93
+ unk_token=unk_token,
94
+ sep_token=sep_token,
95
+ pad_token=pad_token,
96
+ cls_token=cls_token,
97
+ mask_token=mask_token,
98
+ tokenize_chinese_chars=tokenize_chinese_chars,
99
+ strip_accents=strip_accents,
100
+ **kwargs,
101
+ )
102
+
103
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
104
+ if (
105
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
106
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
107
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
108
+ ):
109
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
110
+ normalizer_state["lowercase"] = do_lower_case
111
+ normalizer_state["strip_accents"] = strip_accents
112
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
113
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
114
+
115
+ self.do_lower_case = do_lower_case
116
+
117
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
118
+ """
119
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
120
+ adding special tokens. A BERT sequence has the following format:
121
+
122
+ - single sequence: `[CLS] X [SEP]`
123
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
124
+
125
+ Args:
126
+ token_ids_0 (`List[int]`):
127
+ List of IDs to which the special tokens will be added.
128
+ token_ids_1 (`List[int]`, *optional*):
129
+ Optional second list of IDs for sequence pairs.
130
+
131
+ Returns:
132
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
133
+ """
134
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
135
+
136
+ if token_ids_1 is not None:
137
+ output += token_ids_1 + [self.sep_token_id]
138
+
139
+ return output
140
+
141
+ def create_token_type_ids_from_sequences(
142
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
143
+ ) -> List[int]:
144
+ """
145
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
146
+ pair mask has the following format:
147
+
148
+ ```
149
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
150
+ | first sequence | second sequence |
151
+ ```
152
+
153
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
154
+
155
+ Args:
156
+ token_ids_0 (`List[int]`):
157
+ List of IDs.
158
+ token_ids_1 (`List[int]`, *optional*):
159
+ Optional second list of IDs for sequence pairs.
160
+
161
+ Returns:
162
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
163
+ """
164
+ sep = [self.sep_token_id]
165
+ cls = [self.cls_token_id]
166
+ if token_ids_1 is None:
167
+ return len(cls + token_ids_0 + sep) * [0]
168
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
169
+
170
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
171
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
172
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_tf.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Union
3
+
4
+ import tensorflow as tf
5
+ from tensorflow_text import BertTokenizer as BertTokenizerLayer
6
+ from tensorflow_text import FastBertTokenizer, ShrinkLongestTrimmer, case_fold_utf8, combine_segments, pad_model_inputs
7
+
8
+ from ...modeling_tf_utils import keras
9
+ from .tokenization_bert import BertTokenizer
10
+
11
+
12
+ class TFBertTokenizer(keras.layers.Layer):
13
+ """
14
+ This is an in-graph tokenizer for BERT. It should be initialized similarly to other tokenizers, using the
15
+ `from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings
16
+ from an existing standard tokenizer object.
17
+
18
+ In-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run
19
+ when the model is called, rather than during preprocessing. As a result, they have somewhat more limited options
20
+ than standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes
21
+ straight from `tf.string` inputs to outputs.
22
+
23
+ Args:
24
+ vocab_list (`list`):
25
+ List containing the vocabulary.
26
+ do_lower_case (`bool`, *optional*, defaults to `True`):
27
+ Whether or not to lowercase the input when tokenizing.
28
+ cls_token_id (`str`, *optional*, defaults to `"[CLS]"`):
29
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
30
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
31
+ sep_token_id (`str`, *optional*, defaults to `"[SEP]"`):
32
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
33
+ sequence classification or for a text and a question for question answering. It is also used as the last
34
+ token of a sequence built with special tokens.
35
+ pad_token_id (`str`, *optional*, defaults to `"[PAD]"`):
36
+ The token used for padding, for example when batching sequences of different lengths.
37
+ padding (`str`, defaults to `"longest"`):
38
+ The type of padding to use. Can be either `"longest"`, to pad only up to the longest sample in the batch,
39
+ or `"max_length", to pad all inputs to the maximum length supported by the tokenizer.
40
+ truncation (`bool`, *optional*, defaults to `True`):
41
+ Whether to truncate the sequence to the maximum length.
42
+ max_length (`int`, *optional*, defaults to `512`):
43
+ The maximum length of the sequence, used for padding (if `padding` is "max_length") and/or truncation (if
44
+ `truncation` is `True`).
45
+ pad_to_multiple_of (`int`, *optional*, defaults to `None`):
46
+ If set, the sequence will be padded to a multiple of this value.
47
+ return_token_type_ids (`bool`, *optional*, defaults to `True`):
48
+ Whether to return token_type_ids.
49
+ return_attention_mask (`bool`, *optional*, defaults to `True`):
50
+ Whether to return the attention_mask.
51
+ use_fast_bert_tokenizer (`bool`, *optional*, defaults to `True`):
52
+ If True, will use the FastBertTokenizer class from Tensorflow Text. If False, will use the BertTokenizer
53
+ class instead. BertTokenizer supports some additional options, but is slower and cannot be exported to
54
+ TFLite.
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ vocab_list: List,
60
+ do_lower_case: bool,
61
+ cls_token_id: int = None,
62
+ sep_token_id: int = None,
63
+ pad_token_id: int = None,
64
+ padding: str = "longest",
65
+ truncation: bool = True,
66
+ max_length: int = 512,
67
+ pad_to_multiple_of: int = None,
68
+ return_token_type_ids: bool = True,
69
+ return_attention_mask: bool = True,
70
+ use_fast_bert_tokenizer: bool = True,
71
+ **tokenizer_kwargs,
72
+ ):
73
+ super().__init__()
74
+ if use_fast_bert_tokenizer:
75
+ self.tf_tokenizer = FastBertTokenizer(
76
+ vocab_list, token_out_type=tf.int64, lower_case_nfd_strip_accents=do_lower_case, **tokenizer_kwargs
77
+ )
78
+ else:
79
+ lookup_table = tf.lookup.StaticVocabularyTable(
80
+ tf.lookup.KeyValueTensorInitializer(
81
+ keys=vocab_list,
82
+ key_dtype=tf.string,
83
+ values=tf.range(tf.size(vocab_list, out_type=tf.int64), dtype=tf.int64),
84
+ value_dtype=tf.int64,
85
+ ),
86
+ num_oov_buckets=1,
87
+ )
88
+ self.tf_tokenizer = BertTokenizerLayer(
89
+ lookup_table, token_out_type=tf.int64, lower_case=do_lower_case, **tokenizer_kwargs
90
+ )
91
+
92
+ self.vocab_list = vocab_list
93
+ self.do_lower_case = do_lower_case
94
+ self.cls_token_id = vocab_list.index("[CLS]") if cls_token_id is None else cls_token_id
95
+ self.sep_token_id = vocab_list.index("[SEP]") if sep_token_id is None else sep_token_id
96
+ self.pad_token_id = vocab_list.index("[PAD]") if pad_token_id is None else pad_token_id
97
+ self.paired_trimmer = ShrinkLongestTrimmer(max_length - 3, axis=1) # Allow room for special tokens
98
+ self.max_length = max_length
99
+ self.padding = padding
100
+ self.truncation = truncation
101
+ self.pad_to_multiple_of = pad_to_multiple_of
102
+ self.return_token_type_ids = return_token_type_ids
103
+ self.return_attention_mask = return_attention_mask
104
+
105
+ @classmethod
106
+ def from_tokenizer(cls, tokenizer: "PreTrainedTokenizerBase", **kwargs): # noqa: F821
107
+ """
108
+ Initialize a `TFBertTokenizer` from an existing `Tokenizer`.
109
+
110
+ Args:
111
+ tokenizer (`PreTrainedTokenizerBase`):
112
+ The tokenizer to use to initialize the `TFBertTokenizer`.
113
+
114
+ Examples:
115
+
116
+ ```python
117
+ from transformers import AutoTokenizer, TFBertTokenizer
118
+
119
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
120
+ tf_tokenizer = TFBertTokenizer.from_tokenizer(tokenizer)
121
+ ```
122
+ """
123
+ do_lower_case = kwargs.pop("do_lower_case", None)
124
+ do_lower_case = tokenizer.do_lower_case if do_lower_case is None else do_lower_case
125
+ cls_token_id = kwargs.pop("cls_token_id", None)
126
+ cls_token_id = tokenizer.cls_token_id if cls_token_id is None else cls_token_id
127
+ sep_token_id = kwargs.pop("sep_token_id", None)
128
+ sep_token_id = tokenizer.sep_token_id if sep_token_id is None else sep_token_id
129
+ pad_token_id = kwargs.pop("pad_token_id", None)
130
+ pad_token_id = tokenizer.pad_token_id if pad_token_id is None else pad_token_id
131
+
132
+ vocab = tokenizer.get_vocab()
133
+ vocab = sorted(vocab.items(), key=lambda x: x[1])
134
+ vocab_list = [entry[0] for entry in vocab]
135
+ return cls(
136
+ vocab_list=vocab_list,
137
+ do_lower_case=do_lower_case,
138
+ cls_token_id=cls_token_id,
139
+ sep_token_id=sep_token_id,
140
+ pad_token_id=pad_token_id,
141
+ **kwargs,
142
+ )
143
+
144
+ @classmethod
145
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):
146
+ """
147
+ Instantiate a `TFBertTokenizer` from a pre-trained tokenizer.
148
+
149
+ Args:
150
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
151
+ The name or path to the pre-trained tokenizer.
152
+
153
+ Examples:
154
+
155
+ ```python
156
+ from transformers import TFBertTokenizer
157
+
158
+ tf_tokenizer = TFBertTokenizer.from_pretrained("google-bert/bert-base-uncased")
159
+ ```
160
+ """
161
+ try:
162
+ tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)
163
+ except: # noqa: E722
164
+ from .tokenization_bert_fast import BertTokenizerFast
165
+
166
+ tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs)
167
+ return cls.from_tokenizer(tokenizer, **kwargs)
168
+
169
+ def unpaired_tokenize(self, texts):
170
+ if self.do_lower_case:
171
+ texts = case_fold_utf8(texts)
172
+ tokens = self.tf_tokenizer.tokenize(texts)
173
+ return tokens.merge_dims(1, -1)
174
+
175
+ def call(
176
+ self,
177
+ text,
178
+ text_pair=None,
179
+ padding=None,
180
+ truncation=None,
181
+ max_length=None,
182
+ pad_to_multiple_of=None,
183
+ return_token_type_ids=None,
184
+ return_attention_mask=None,
185
+ ):
186
+ if padding is None:
187
+ padding = self.padding
188
+ if padding not in ("longest", "max_length"):
189
+ raise ValueError("Padding must be either 'longest' or 'max_length'!")
190
+ if max_length is not None and text_pair is not None:
191
+ # Because we have to instantiate a Trimmer to do it properly
192
+ raise ValueError("max_length cannot be overridden at call time when truncating paired texts!")
193
+ if max_length is None:
194
+ max_length = self.max_length
195
+ if truncation is None:
196
+ truncation = self.truncation
197
+ if pad_to_multiple_of is None:
198
+ pad_to_multiple_of = self.pad_to_multiple_of
199
+ if return_token_type_ids is None:
200
+ return_token_type_ids = self.return_token_type_ids
201
+ if return_attention_mask is None:
202
+ return_attention_mask = self.return_attention_mask
203
+ if not isinstance(text, tf.Tensor):
204
+ text = tf.convert_to_tensor(text)
205
+ if text_pair is not None and not isinstance(text_pair, tf.Tensor):
206
+ text_pair = tf.convert_to_tensor(text_pair)
207
+ if text_pair is not None:
208
+ if text.shape.rank > 1:
209
+ raise ValueError("text argument should not be multidimensional when a text pair is supplied!")
210
+ if text_pair.shape.rank > 1:
211
+ raise ValueError("text_pair should not be multidimensional!")
212
+ if text.shape.rank == 2:
213
+ text, text_pair = text[:, 0], text[:, 1]
214
+ text = self.unpaired_tokenize(text)
215
+ if text_pair is None: # Unpaired text
216
+ if truncation:
217
+ text = text[:, : max_length - 2] # Allow room for special tokens
218
+ input_ids, token_type_ids = combine_segments(
219
+ (text,), start_of_sequence_id=self.cls_token_id, end_of_segment_id=self.sep_token_id
220
+ )
221
+ else: # Paired text
222
+ text_pair = self.unpaired_tokenize(text_pair)
223
+ if truncation:
224
+ text, text_pair = self.paired_trimmer.trim([text, text_pair])
225
+ input_ids, token_type_ids = combine_segments(
226
+ (text, text_pair), start_of_sequence_id=self.cls_token_id, end_of_segment_id=self.sep_token_id
227
+ )
228
+ if padding == "longest":
229
+ pad_length = input_ids.bounding_shape(axis=1)
230
+ if pad_to_multiple_of is not None:
231
+ # No ceiling division in tensorflow, so we negate floordiv instead
232
+ pad_length = pad_to_multiple_of * (-tf.math.floordiv(-pad_length, pad_to_multiple_of))
233
+ else:
234
+ pad_length = max_length
235
+
236
+ input_ids, attention_mask = pad_model_inputs(input_ids, max_seq_length=pad_length, pad_value=self.pad_token_id)
237
+ output = {"input_ids": input_ids}
238
+ if return_attention_mask:
239
+ output["attention_mask"] = attention_mask
240
+ if return_token_type_ids:
241
+ token_type_ids, _ = pad_model_inputs(
242
+ token_type_ids, max_seq_length=pad_length, pad_value=self.pad_token_id
243
+ )
244
+ output["token_type_ids"] = token_type_ids
245
+ return output
246
+
247
+ def get_config(self):
248
+ return {
249
+ "vocab_list": self.vocab_list,
250
+ "do_lower_case": self.do_lower_case,
251
+ "cls_token_id": self.cls_token_id,
252
+ "sep_token_id": self.sep_token_id,
253
+ "pad_token_id": self.pad_token_id,
254
+ }
llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_byt5": ["ByT5Tokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_byt5 import ByT5Tokenizer
25
+ else:
26
+ import sys
27
+
28
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (506 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/convert_byt5_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc ADDED
Binary file (9.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The T5 authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert T5 checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5
21
+ from transformers.utils import logging
22
+
23
+
24
+ logging.set_verbosity_info()
25
+
26
+
27
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
28
+ # Initialise PyTorch model
29
+ config = T5Config.from_json_file(config_file)
30
+ print(f"Building PyTorch model from configuration: {config}")
31
+ model = T5ForConditionalGeneration(config)
32
+
33
+ # Load weights from tf checkpoint
34
+ load_tf_weights_in_t5(model, config, tf_checkpoint_path)
35
+
36
+ # Save pytorch-model
37
+ print(f"Save PyTorch model to {pytorch_dump_path}")
38
+ model.save_pretrained(pytorch_dump_path)
39
+
40
+
41
+ if __name__ == "__main__":
42
+ parser = argparse.ArgumentParser()
43
+ # Required parameters
44
+ parser.add_argument(
45
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
46
+ )
47
+ parser.add_argument(
48
+ "--config_file",
49
+ default=None,
50
+ type=str,
51
+ required=True,
52
+ help=(
53
+ "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
54
+ ),
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ args = parser.parse_args()
60
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 T5 Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model ByT5."""
16
+
17
+
18
+ import warnings
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class ByT5Tokenizer(PreTrainedTokenizer):
29
+ """
30
+ Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding.
31
+
32
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
33
+ this superclass for more information regarding those methods.
34
+
35
+ Args:
36
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
37
+ The end of sequence token.
38
+
39
+ <Tip>
40
+
41
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
42
+ The token used is the `sep_token`.
43
+
44
+ </Tip>
45
+
46
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
47
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
48
+ token instead.
49
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
50
+ The token used for padding, for example when batching sequences of different lengths.
51
+ extra_ids (`int`, *optional*, defaults to 125):
52
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
53
+ accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
54
+ indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
55
+ like in ByT5 preprocessing see
56
+ [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
57
+ additional_special_tokens (`List[str]`, *optional*):
58
+ Additional special tokens used by the tokenizer.
59
+ """
60
+
61
+ model_input_names = ["input_ids", "attention_mask"]
62
+
63
+ def __init__(
64
+ self,
65
+ eos_token="</s>",
66
+ unk_token="<unk>",
67
+ pad_token="<pad>",
68
+ extra_ids=125,
69
+ additional_special_tokens=None,
70
+ **kwargs,
71
+ ) -> None:
72
+ # Add extra_ids to the special token list
73
+ if extra_ids > 0 and additional_special_tokens is None:
74
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
75
+ elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0:
76
+ # Check that we have the right number of extra_id special tokens
77
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
78
+ if extra_tokens != extra_ids:
79
+ raise ValueError(
80
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
81
+ " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
82
+ " extra_ids tokens"
83
+ )
84
+
85
+ pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token
86
+ # we force left and right stripping for backward compatibility. The byt5tests depend on this.
87
+ eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token
88
+ unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token
89
+ # unk token needs to be in the vocab with correct index
90
+ self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token}
91
+ self.offset = len(self._added_tokens_decoder)
92
+ self._utf_vocab_size = 2**8 # utf is 8 bits
93
+ super().__init__(
94
+ eos_token=eos_token,
95
+ unk_token=unk_token,
96
+ pad_token=pad_token,
97
+ extra_ids=0,
98
+ additional_special_tokens=additional_special_tokens, # TODO extra ids are not used :sweatywmile:
99
+ **kwargs,
100
+ )
101
+
102
+ @property
103
+ def vocab_size(self):
104
+ return self._utf_vocab_size
105
+
106
+ def get_vocab(self):
107
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
108
+ vocab.update(self.added_tokens_encoder)
109
+ return vocab
110
+
111
+ def get_special_tokens_mask(
112
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
113
+ ) -> List[int]:
114
+ """
115
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
116
+ special tokens using the tokenizer `prepare_for_model` method.
117
+
118
+ Args:
119
+ token_ids_0 (`List[int]`):
120
+ List of IDs.
121
+ token_ids_1 (`List[int]`, *optional*):
122
+ Optional second list of IDs for sequence pairs.
123
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
124
+ Whether or not the token list is already formatted with special tokens for the model.
125
+
126
+ Returns:
127
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
128
+ """
129
+ if already_has_special_tokens:
130
+ return super().get_special_tokens_mask(
131
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
132
+ )
133
+
134
+ # normal case: some special tokens
135
+ if token_ids_1 is None:
136
+ return ([0] * len(token_ids_0)) + [1]
137
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
138
+
139
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
140
+ """Do not add eos again if user already added it."""
141
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
142
+ warnings.warn(
143
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
144
+ " eos tokens being added."
145
+ )
146
+ return token_ids
147
+ else:
148
+ return token_ids + [self.eos_token_id]
149
+
150
+ def create_token_type_ids_from_sequences(
151
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
152
+ ) -> List[int]:
153
+ """
154
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
155
+ make use of token type ids, therefore a list of zeros is returned.
156
+
157
+ Args:
158
+ token_ids_0 (`List[int]`):
159
+ List of IDs.
160
+ token_ids_1 (`List[int]`, *optional*):
161
+ Optional second list of IDs for sequence pairs.
162
+
163
+ Returns:
164
+ `List[int]`: List of zeros.
165
+ """
166
+ eos = [self.eos_token_id]
167
+
168
+ if token_ids_1 is None:
169
+ return len(token_ids_0 + eos) * [0]
170
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
171
+
172
+ def build_inputs_with_special_tokens(
173
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
174
+ ) -> List[int]:
175
+ """
176
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
177
+ adding special tokens. A sequence has the following format:
178
+
179
+ - single sequence: `X </s>`
180
+ - pair of sequences: `A </s> B </s>`
181
+
182
+ Args:
183
+ token_ids_0 (`List[int]`):
184
+ List of IDs to which the special tokens will be added.
185
+ token_ids_1 (`List[int]`, *optional*):
186
+ Optional second list of IDs for sequence pairs.
187
+
188
+ Returns:
189
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
190
+ """
191
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
192
+ if token_ids_1 is None:
193
+ return token_ids_0
194
+ else:
195
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
196
+ return token_ids_0 + token_ids_1
197
+
198
+ def _tokenize(self, text: str) -> List[str]:
199
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
200
+ tokens = [chr(i) for i in text.encode("utf-8")]
201
+ return tokens
202
+
203
+ def _convert_token_to_id(self, token):
204
+ """Converts a token (str) in an id using the vocab."""
205
+
206
+ if len(token) != 1:
207
+ token_id = None
208
+ else:
209
+ token_id = ord(token) + self.offset
210
+
211
+ return token_id
212
+
213
+ def _convert_id_to_token(self, index):
214
+ """Converts an index (integer) in a token (str) using the vocab."""
215
+ token = chr(index - self.offset)
216
+ return token
217
+
218
+ def convert_tokens_to_string(self, tokens):
219
+ """Converts a sequence of tokens (string) in a single string."""
220
+ bstring = b""
221
+ for token in tokens:
222
+ if token in self.added_tokens_decoder:
223
+ tok_string = self.added_tokens_decoder[token].encode("utf-8")
224
+ elif token in self.added_tokens_encoder:
225
+ tok_string = token.encode("utf-8")
226
+ else:
227
+ tok_string = bytes([ord(token)])
228
+ bstring += tok_string
229
+ string = bstring.decode("utf-8", errors="ignore")
230
+ return string
231
+
232
+ # ByT5Tokenizer has no vocab file
233
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
234
+ return ()
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__init__.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_cvt": ["CVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CvtConfig"]}
20
+
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_cvt"] = [
29
+ "CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
30
+ "CvtForImageClassification",
31
+ "CvtModel",
32
+ "CvtPreTrainedModel",
33
+ ]
34
+
35
+ try:
36
+ if not is_tf_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_tf_cvt"] = [
42
+ "TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
43
+ "TFCvtForImageClassification",
44
+ "TFCvtModel",
45
+ "TFCvtPreTrainedModel",
46
+ ]
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_cvt import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP, CvtConfig
50
+
51
+ try:
52
+ if not is_torch_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .modeling_cvt import (
58
+ CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
59
+ CvtForImageClassification,
60
+ CvtModel,
61
+ CvtPreTrainedModel,
62
+ )
63
+
64
+ try:
65
+ if not is_tf_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .modeling_tf_cvt import (
71
+ TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
72
+ TFCvtForImageClassification,
73
+ TFCvtModel,
74
+ TFCvtPreTrainedModel,
75
+ )
76
+
77
+
78
+ else:
79
+ import sys
80
+
81
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/configuration_cvt.cpython-310.pyc ADDED
Binary file (6.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/convert_cvt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (9.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_cvt.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/__pycache__/modeling_tf_cvt.cpython-310.pyc ADDED
Binary file (34.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/configuration_cvt.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CvT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class CvtConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`CvtModel`]. It is used to instantiate a CvT model
30
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
31
+ defaults will yield a similar configuration to that of the CvT
32
+ [microsoft/cvt-13](https://huggingface.co/microsoft/cvt-13) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ num_channels (`int`, *optional*, defaults to 3):
39
+ The number of input channels.
40
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3]`):
41
+ The kernel size of each encoder's patch embedding.
42
+ patch_stride (`List[int]`, *optional*, defaults to `[4, 2, 2]`):
43
+ The stride size of each encoder's patch embedding.
44
+ patch_padding (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
45
+ The padding size of each encoder's patch embedding.
46
+ embed_dim (`List[int]`, *optional*, defaults to `[64, 192, 384]`):
47
+ Dimension of each of the encoder blocks.
48
+ num_heads (`List[int]`, *optional*, defaults to `[1, 3, 6]`):
49
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
50
+ depth (`List[int]`, *optional*, defaults to `[1, 2, 10]`):
51
+ The number of layers in each encoder block.
52
+ mlp_ratios (`List[float]`, *optional*, defaults to `[4.0, 4.0, 4.0, 4.0]`):
53
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
54
+ encoder blocks.
55
+ attention_drop_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
56
+ The dropout ratio for the attention probabilities.
57
+ drop_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
58
+ The dropout ratio for the patch embeddings probabilities.
59
+ drop_path_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.1]`):
60
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
61
+ qkv_bias (`List[bool]`, *optional*, defaults to `[True, True, True]`):
62
+ The bias bool for query, key and value in attentions
63
+ cls_token (`List[bool]`, *optional*, defaults to `[False, False, True]`):
64
+ Whether or not to add a classification token to the output of each of the last 3 stages.
65
+ qkv_projection_method (`List[string]`, *optional*, defaults to ["dw_bn", "dw_bn", "dw_bn"]`):
66
+ The projection method for query, key and value Default is depth-wise convolutions with batch norm. For
67
+ Linear projection use "avg".
68
+ kernel_qkv (`List[int]`, *optional*, defaults to `[3, 3, 3]`):
69
+ The kernel size for query, key and value in attention layer
70
+ padding_kv (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
71
+ The padding size for key and value in attention layer
72
+ stride_kv (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
73
+ The stride size for key and value in attention layer
74
+ padding_q (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
75
+ The padding size for query in attention layer
76
+ stride_q (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
77
+ The stride size for query in attention layer
78
+ initializer_range (`float`, *optional*, defaults to 0.02):
79
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
80
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
81
+ The epsilon used by the layer normalization layers.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import CvtConfig, CvtModel
87
+
88
+ >>> # Initializing a Cvt msft/cvt style configuration
89
+ >>> configuration = CvtConfig()
90
+
91
+ >>> # Initializing a model (with random weights) from the msft/cvt style configuration
92
+ >>> model = CvtModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "cvt"
99
+
100
+ def __init__(
101
+ self,
102
+ num_channels=3,
103
+ patch_sizes=[7, 3, 3],
104
+ patch_stride=[4, 2, 2],
105
+ patch_padding=[2, 1, 1],
106
+ embed_dim=[64, 192, 384],
107
+ num_heads=[1, 3, 6],
108
+ depth=[1, 2, 10],
109
+ mlp_ratio=[4.0, 4.0, 4.0],
110
+ attention_drop_rate=[0.0, 0.0, 0.0],
111
+ drop_rate=[0.0, 0.0, 0.0],
112
+ drop_path_rate=[0.0, 0.0, 0.1],
113
+ qkv_bias=[True, True, True],
114
+ cls_token=[False, False, True],
115
+ qkv_projection_method=["dw_bn", "dw_bn", "dw_bn"],
116
+ kernel_qkv=[3, 3, 3],
117
+ padding_kv=[1, 1, 1],
118
+ stride_kv=[2, 2, 2],
119
+ padding_q=[1, 1, 1],
120
+ stride_q=[1, 1, 1],
121
+ initializer_range=0.02,
122
+ layer_norm_eps=1e-12,
123
+ **kwargs,
124
+ ):
125
+ super().__init__(**kwargs)
126
+ self.num_channels = num_channels
127
+ self.patch_sizes = patch_sizes
128
+ self.patch_stride = patch_stride
129
+ self.patch_padding = patch_padding
130
+ self.embed_dim = embed_dim
131
+ self.num_heads = num_heads
132
+ self.depth = depth
133
+ self.mlp_ratio = mlp_ratio
134
+ self.attention_drop_rate = attention_drop_rate
135
+ self.drop_rate = drop_rate
136
+ self.drop_path_rate = drop_path_rate
137
+ self.qkv_bias = qkv_bias
138
+ self.cls_token = cls_token
139
+ self.qkv_projection_method = qkv_projection_method
140
+ self.kernel_qkv = kernel_qkv
141
+ self.padding_kv = padding_kv
142
+ self.stride_kv = stride_kv
143
+ self.padding_q = padding_q
144
+ self.stride_q = stride_q
145
+ self.initializer_range = initializer_range
146
+ self.layer_norm_eps = layer_norm_eps
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert CvT checkpoints from the original repository.
16
+
17
+ URL: https://github.com/microsoft/CvT"""
18
+
19
+
20
+ import argparse
21
+ import json
22
+ from collections import OrderedDict
23
+
24
+ import torch
25
+ from huggingface_hub import cached_download, hf_hub_url
26
+
27
+ from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
28
+
29
+
30
+ def embeddings(idx):
31
+ """
32
+ The function helps in renaming embedding layer weights.
33
+
34
+ Args:
35
+ idx: stage number in original model
36
+ """
37
+ embed = []
38
+ embed.append(
39
+ (
40
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
41
+ f"stage{idx}.patch_embed.proj.weight",
42
+ )
43
+ )
44
+ embed.append(
45
+ (
46
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
47
+ f"stage{idx}.patch_embed.proj.bias",
48
+ )
49
+ )
50
+ embed.append(
51
+ (
52
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
53
+ f"stage{idx}.patch_embed.norm.weight",
54
+ )
55
+ )
56
+ embed.append(
57
+ (
58
+ f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
59
+ f"stage{idx}.patch_embed.norm.bias",
60
+ )
61
+ )
62
+ return embed
63
+
64
+
65
+ def attention(idx, cnt):
66
+ """
67
+ The function helps in renaming attention block layers weights.
68
+
69
+ Args:
70
+ idx: stage number in original model
71
+ cnt: count of blocks in each stage
72
+ """
73
+ attention_weights = []
74
+ attention_weights.append(
75
+ (
76
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
77
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
78
+ )
79
+ )
80
+ attention_weights.append(
81
+ (
82
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
83
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
84
+ )
85
+ )
86
+ attention_weights.append(
87
+ (
88
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
89
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
90
+ )
91
+ )
92
+ attention_weights.append(
93
+ (
94
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
95
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
96
+ )
97
+ )
98
+ attention_weights.append(
99
+ (
100
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
101
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
102
+ )
103
+ )
104
+ attention_weights.append(
105
+ (
106
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
107
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
108
+ )
109
+ )
110
+ attention_weights.append(
111
+ (
112
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
113
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
114
+ )
115
+ )
116
+ attention_weights.append(
117
+ (
118
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
119
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
120
+ )
121
+ )
122
+ attention_weights.append(
123
+ (
124
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
125
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
126
+ )
127
+ )
128
+ attention_weights.append(
129
+ (
130
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
131
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
132
+ )
133
+ )
134
+ attention_weights.append(
135
+ (
136
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
137
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
138
+ )
139
+ )
140
+ attention_weights.append(
141
+ (
142
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
143
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
144
+ )
145
+ )
146
+ attention_weights.append(
147
+ (
148
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
149
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
150
+ )
151
+ )
152
+ attention_weights.append(
153
+ (
154
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
155
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
156
+ )
157
+ )
158
+ attention_weights.append(
159
+ (
160
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
161
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
162
+ )
163
+ )
164
+ attention_weights.append(
165
+ (
166
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
167
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
168
+ )
169
+ )
170
+ attention_weights.append(
171
+ (
172
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
173
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
174
+ )
175
+ )
176
+ attention_weights.append(
177
+ (
178
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
179
+ f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
180
+ )
181
+ )
182
+ attention_weights.append(
183
+ (
184
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
185
+ f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
186
+ )
187
+ )
188
+ attention_weights.append(
189
+ (
190
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
191
+ f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
192
+ )
193
+ )
194
+ attention_weights.append(
195
+ (
196
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
197
+ f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
198
+ )
199
+ )
200
+ attention_weights.append(
201
+ (
202
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
203
+ f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
204
+ )
205
+ )
206
+ attention_weights.append(
207
+ (
208
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
209
+ f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
210
+ )
211
+ )
212
+ attention_weights.append(
213
+ (
214
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
215
+ f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
216
+ )
217
+ )
218
+ attention_weights.append(
219
+ (
220
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
221
+ f"stage{idx}.blocks.{cnt}.attn.proj.weight",
222
+ )
223
+ )
224
+ attention_weights.append(
225
+ (
226
+ f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
227
+ f"stage{idx}.blocks.{cnt}.attn.proj.bias",
228
+ )
229
+ )
230
+ attention_weights.append(
231
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight")
232
+ )
233
+ attention_weights.append(
234
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias")
235
+ )
236
+ attention_weights.append(
237
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight")
238
+ )
239
+ attention_weights.append(
240
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias")
241
+ )
242
+ attention_weights.append(
243
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight")
244
+ )
245
+ attention_weights.append(
246
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias")
247
+ )
248
+ attention_weights.append(
249
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight")
250
+ )
251
+ attention_weights.append(
252
+ (f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias")
253
+ )
254
+ return attention_weights
255
+
256
+
257
+ def cls_token(idx):
258
+ """
259
+ Function helps in renaming cls_token weights
260
+ """
261
+ token = []
262
+ token.append((f"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token"))
263
+ return token
264
+
265
+
266
+ def final():
267
+ """
268
+ Function helps in renaming final classification layer
269
+ """
270
+ head = []
271
+ head.append(("layernorm.weight", "norm.weight"))
272
+ head.append(("layernorm.bias", "norm.bias"))
273
+ head.append(("classifier.weight", "head.weight"))
274
+ head.append(("classifier.bias", "head.bias"))
275
+ return head
276
+
277
+
278
+ def convert_cvt_checkpoint(cvt_model, image_size, cvt_file_name, pytorch_dump_folder):
279
+ """
280
+ Fucntion to convert the microsoft cvt checkpoint to huggingface checkpoint
281
+ """
282
+ img_labels_file = "imagenet-1k-id2label.json"
283
+ num_labels = 1000
284
+
285
+ repo_id = "huggingface/label-files"
286
+ num_labels = num_labels
287
+ id2label = json.load(open(cached_download(hf_hub_url(repo_id, img_labels_file, repo_type="dataset")), "r"))
288
+ id2label = {int(k): v for k, v in id2label.items()}
289
+
290
+ id2label = id2label
291
+ label2id = {v: k for k, v in id2label.items()}
292
+
293
+ config = config = CvtConfig(num_labels=num_labels, id2label=id2label, label2id=label2id)
294
+
295
+ # For depth size 13 (13 = 1+2+10)
296
+ if cvt_model.rsplit("/", 1)[-1][4:6] == "13":
297
+ config.depth = [1, 2, 10]
298
+
299
+ # For depth size 21 (21 = 1+4+16)
300
+ elif cvt_model.rsplit("/", 1)[-1][4:6] == "21":
301
+ config.depth = [1, 4, 16]
302
+
303
+ # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
304
+ else:
305
+ config.depth = [2, 2, 20]
306
+ config.num_heads = [3, 12, 16]
307
+ config.embed_dim = [192, 768, 1024]
308
+
309
+ model = CvtForImageClassification(config)
310
+ image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
311
+ image_processor.size["shortest_edge"] = image_size
312
+ original_weights = torch.load(cvt_file_name, map_location=torch.device("cpu"))
313
+
314
+ huggingface_weights = OrderedDict()
315
+ list_of_state_dict = []
316
+
317
+ for idx in range(len(config.depth)):
318
+ if config.cls_token[idx]:
319
+ list_of_state_dict = list_of_state_dict + cls_token(idx)
320
+ list_of_state_dict = list_of_state_dict + embeddings(idx)
321
+ for cnt in range(config.depth[idx]):
322
+ list_of_state_dict = list_of_state_dict + attention(idx, cnt)
323
+
324
+ list_of_state_dict = list_of_state_dict + final()
325
+ for gg in list_of_state_dict:
326
+ print(gg)
327
+ for i in range(len(list_of_state_dict)):
328
+ huggingface_weights[list_of_state_dict[i][0]] = original_weights[list_of_state_dict[i][1]]
329
+
330
+ model.load_state_dict(huggingface_weights)
331
+ model.save_pretrained(pytorch_dump_folder)
332
+ image_processor.save_pretrained(pytorch_dump_folder)
333
+
334
+
335
+ # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
336
+
337
+ if __name__ == "__main__":
338
+ parser = argparse.ArgumentParser()
339
+ parser.add_argument(
340
+ "--cvt_model",
341
+ default="cvt-w24",
342
+ type=str,
343
+ help="Name of the cvt model you'd like to convert.",
344
+ )
345
+ parser.add_argument(
346
+ "--image_size",
347
+ default=384,
348
+ type=int,
349
+ help="Input Image Size",
350
+ )
351
+ parser.add_argument(
352
+ "--cvt_file_name",
353
+ default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
354
+ type=str,
355
+ help="Input Image Size",
356
+ )
357
+ parser.add_argument(
358
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
359
+ )
360
+
361
+ args = parser.parse_args()
362
+ convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/modeling_cvt.py ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CvT model."""
16
+
17
+
18
+ import collections.abc
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
28
+ from ...modeling_outputs import ImageClassifierOutputWithNoAttention, ModelOutput
29
+ from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
30
+ from ...utils import logging
31
+ from .configuration_cvt import CvtConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ # General docstring
37
+ _CONFIG_FOR_DOC = "CvtConfig"
38
+
39
+ # Base docstring
40
+ _CHECKPOINT_FOR_DOC = "microsoft/cvt-13"
41
+ _EXPECTED_OUTPUT_SHAPE = [1, 384, 14, 14]
42
+
43
+ # Image classification docstring
44
+ _IMAGE_CLASS_CHECKPOINT = "microsoft/cvt-13"
45
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
46
+
47
+
48
+ from ..deprecated._archive_maps import CVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
49
+
50
+
51
+ @dataclass
52
+ class BaseModelOutputWithCLSToken(ModelOutput):
53
+ """
54
+ Base class for model's outputs, with potential hidden states and attentions.
55
+
56
+ Args:
57
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
58
+ Sequence of hidden-states at the output of the last layer of the model.
59
+ cls_token_value (`torch.FloatTensor` of shape `(batch_size, 1, hidden_size)`):
60
+ Classification token at the output of the last layer of the model.
61
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
62
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
63
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
64
+ plus the initial embedding outputs.
65
+ """
66
+
67
+ last_hidden_state: torch.FloatTensor = None
68
+ cls_token_value: torch.FloatTensor = None
69
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
70
+
71
+
72
+ # Copied from transformers.models.beit.modeling_beit.drop_path
73
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
74
+ """
75
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
76
+
77
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
78
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
79
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
80
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
81
+ argument.
82
+ """
83
+ if drop_prob == 0.0 or not training:
84
+ return input
85
+ keep_prob = 1 - drop_prob
86
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
87
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
88
+ random_tensor.floor_() # binarize
89
+ output = input.div(keep_prob) * random_tensor
90
+ return output
91
+
92
+
93
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath
94
+ class CvtDropPath(nn.Module):
95
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
96
+
97
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
98
+ super().__init__()
99
+ self.drop_prob = drop_prob
100
+
101
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
102
+ return drop_path(hidden_states, self.drop_prob, self.training)
103
+
104
+ def extra_repr(self) -> str:
105
+ return "p={}".format(self.drop_prob)
106
+
107
+
108
+ class CvtEmbeddings(nn.Module):
109
+ """
110
+ Construct the CvT embeddings.
111
+ """
112
+
113
+ def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate):
114
+ super().__init__()
115
+ self.convolution_embeddings = CvtConvEmbeddings(
116
+ patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding
117
+ )
118
+ self.dropout = nn.Dropout(dropout_rate)
119
+
120
+ def forward(self, pixel_values):
121
+ hidden_state = self.convolution_embeddings(pixel_values)
122
+ hidden_state = self.dropout(hidden_state)
123
+ return hidden_state
124
+
125
+
126
+ class CvtConvEmbeddings(nn.Module):
127
+ """
128
+ Image to Conv Embedding.
129
+ """
130
+
131
+ def __init__(self, patch_size, num_channels, embed_dim, stride, padding):
132
+ super().__init__()
133
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
134
+ self.patch_size = patch_size
135
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=stride, padding=padding)
136
+ self.normalization = nn.LayerNorm(embed_dim)
137
+
138
+ def forward(self, pixel_values):
139
+ pixel_values = self.projection(pixel_values)
140
+ batch_size, num_channels, height, width = pixel_values.shape
141
+ hidden_size = height * width
142
+ # rearrange "b c h w -> b (h w) c"
143
+ pixel_values = pixel_values.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
144
+ if self.normalization:
145
+ pixel_values = self.normalization(pixel_values)
146
+ # rearrange "b (h w) c" -> b c h w"
147
+ pixel_values = pixel_values.permute(0, 2, 1).view(batch_size, num_channels, height, width)
148
+ return pixel_values
149
+
150
+
151
+ class CvtSelfAttentionConvProjection(nn.Module):
152
+ def __init__(self, embed_dim, kernel_size, padding, stride):
153
+ super().__init__()
154
+ self.convolution = nn.Conv2d(
155
+ embed_dim,
156
+ embed_dim,
157
+ kernel_size=kernel_size,
158
+ padding=padding,
159
+ stride=stride,
160
+ bias=False,
161
+ groups=embed_dim,
162
+ )
163
+ self.normalization = nn.BatchNorm2d(embed_dim)
164
+
165
+ def forward(self, hidden_state):
166
+ hidden_state = self.convolution(hidden_state)
167
+ hidden_state = self.normalization(hidden_state)
168
+ return hidden_state
169
+
170
+
171
+ class CvtSelfAttentionLinearProjection(nn.Module):
172
+ def forward(self, hidden_state):
173
+ batch_size, num_channels, height, width = hidden_state.shape
174
+ hidden_size = height * width
175
+ # rearrange " b c h w -> b (h w) c"
176
+ hidden_state = hidden_state.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
177
+ return hidden_state
178
+
179
+
180
+ class CvtSelfAttentionProjection(nn.Module):
181
+ def __init__(self, embed_dim, kernel_size, padding, stride, projection_method="dw_bn"):
182
+ super().__init__()
183
+ if projection_method == "dw_bn":
184
+ self.convolution_projection = CvtSelfAttentionConvProjection(embed_dim, kernel_size, padding, stride)
185
+ self.linear_projection = CvtSelfAttentionLinearProjection()
186
+
187
+ def forward(self, hidden_state):
188
+ hidden_state = self.convolution_projection(hidden_state)
189
+ hidden_state = self.linear_projection(hidden_state)
190
+ return hidden_state
191
+
192
+
193
+ class CvtSelfAttention(nn.Module):
194
+ def __init__(
195
+ self,
196
+ num_heads,
197
+ embed_dim,
198
+ kernel_size,
199
+ padding_q,
200
+ padding_kv,
201
+ stride_q,
202
+ stride_kv,
203
+ qkv_projection_method,
204
+ qkv_bias,
205
+ attention_drop_rate,
206
+ with_cls_token=True,
207
+ **kwargs,
208
+ ):
209
+ super().__init__()
210
+ self.scale = embed_dim**-0.5
211
+ self.with_cls_token = with_cls_token
212
+ self.embed_dim = embed_dim
213
+ self.num_heads = num_heads
214
+
215
+ self.convolution_projection_query = CvtSelfAttentionProjection(
216
+ embed_dim,
217
+ kernel_size,
218
+ padding_q,
219
+ stride_q,
220
+ projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method,
221
+ )
222
+ self.convolution_projection_key = CvtSelfAttentionProjection(
223
+ embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
224
+ )
225
+ self.convolution_projection_value = CvtSelfAttentionProjection(
226
+ embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
227
+ )
228
+
229
+ self.projection_query = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
230
+ self.projection_key = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
231
+ self.projection_value = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
232
+
233
+ self.dropout = nn.Dropout(attention_drop_rate)
234
+
235
+ def rearrange_for_multi_head_attention(self, hidden_state):
236
+ batch_size, hidden_size, _ = hidden_state.shape
237
+ head_dim = self.embed_dim // self.num_heads
238
+ # rearrange 'b t (h d) -> b h t d'
239
+ return hidden_state.view(batch_size, hidden_size, self.num_heads, head_dim).permute(0, 2, 1, 3)
240
+
241
+ def forward(self, hidden_state, height, width):
242
+ if self.with_cls_token:
243
+ cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
244
+ batch_size, hidden_size, num_channels = hidden_state.shape
245
+ # rearrange "b (h w) c -> b c h w"
246
+ hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
247
+
248
+ key = self.convolution_projection_key(hidden_state)
249
+ query = self.convolution_projection_query(hidden_state)
250
+ value = self.convolution_projection_value(hidden_state)
251
+
252
+ if self.with_cls_token:
253
+ query = torch.cat((cls_token, query), dim=1)
254
+ key = torch.cat((cls_token, key), dim=1)
255
+ value = torch.cat((cls_token, value), dim=1)
256
+
257
+ head_dim = self.embed_dim // self.num_heads
258
+
259
+ query = self.rearrange_for_multi_head_attention(self.projection_query(query))
260
+ key = self.rearrange_for_multi_head_attention(self.projection_key(key))
261
+ value = self.rearrange_for_multi_head_attention(self.projection_value(value))
262
+
263
+ attention_score = torch.einsum("bhlk,bhtk->bhlt", [query, key]) * self.scale
264
+ attention_probs = torch.nn.functional.softmax(attention_score, dim=-1)
265
+ attention_probs = self.dropout(attention_probs)
266
+
267
+ context = torch.einsum("bhlt,bhtv->bhlv", [attention_probs, value])
268
+ # rearrange"b h t d -> b t (h d)"
269
+ _, _, hidden_size, _ = context.shape
270
+ context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, hidden_size, self.num_heads * head_dim)
271
+ return context
272
+
273
+
274
+ class CvtSelfOutput(nn.Module):
275
+ """
276
+ The residual connection is defined in CvtLayer instead of here (as is the case with other models), due to the
277
+ layernorm applied before each block.
278
+ """
279
+
280
+ def __init__(self, embed_dim, drop_rate):
281
+ super().__init__()
282
+ self.dense = nn.Linear(embed_dim, embed_dim)
283
+ self.dropout = nn.Dropout(drop_rate)
284
+
285
+ def forward(self, hidden_state, input_tensor):
286
+ hidden_state = self.dense(hidden_state)
287
+ hidden_state = self.dropout(hidden_state)
288
+ return hidden_state
289
+
290
+
291
+ class CvtAttention(nn.Module):
292
+ def __init__(
293
+ self,
294
+ num_heads,
295
+ embed_dim,
296
+ kernel_size,
297
+ padding_q,
298
+ padding_kv,
299
+ stride_q,
300
+ stride_kv,
301
+ qkv_projection_method,
302
+ qkv_bias,
303
+ attention_drop_rate,
304
+ drop_rate,
305
+ with_cls_token=True,
306
+ ):
307
+ super().__init__()
308
+ self.attention = CvtSelfAttention(
309
+ num_heads,
310
+ embed_dim,
311
+ kernel_size,
312
+ padding_q,
313
+ padding_kv,
314
+ stride_q,
315
+ stride_kv,
316
+ qkv_projection_method,
317
+ qkv_bias,
318
+ attention_drop_rate,
319
+ with_cls_token,
320
+ )
321
+ self.output = CvtSelfOutput(embed_dim, drop_rate)
322
+ self.pruned_heads = set()
323
+
324
+ def prune_heads(self, heads):
325
+ if len(heads) == 0:
326
+ return
327
+ heads, index = find_pruneable_heads_and_indices(
328
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
329
+ )
330
+
331
+ # Prune linear layers
332
+ self.attention.query = prune_linear_layer(self.attention.query, index)
333
+ self.attention.key = prune_linear_layer(self.attention.key, index)
334
+ self.attention.value = prune_linear_layer(self.attention.value, index)
335
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
336
+
337
+ # Update hyper params and store pruned heads
338
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
339
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
340
+ self.pruned_heads = self.pruned_heads.union(heads)
341
+
342
+ def forward(self, hidden_state, height, width):
343
+ self_output = self.attention(hidden_state, height, width)
344
+ attention_output = self.output(self_output, hidden_state)
345
+ return attention_output
346
+
347
+
348
+ class CvtIntermediate(nn.Module):
349
+ def __init__(self, embed_dim, mlp_ratio):
350
+ super().__init__()
351
+ self.dense = nn.Linear(embed_dim, int(embed_dim * mlp_ratio))
352
+ self.activation = nn.GELU()
353
+
354
+ def forward(self, hidden_state):
355
+ hidden_state = self.dense(hidden_state)
356
+ hidden_state = self.activation(hidden_state)
357
+ return hidden_state
358
+
359
+
360
+ class CvtOutput(nn.Module):
361
+ def __init__(self, embed_dim, mlp_ratio, drop_rate):
362
+ super().__init__()
363
+ self.dense = nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
364
+ self.dropout = nn.Dropout(drop_rate)
365
+
366
+ def forward(self, hidden_state, input_tensor):
367
+ hidden_state = self.dense(hidden_state)
368
+ hidden_state = self.dropout(hidden_state)
369
+ hidden_state = hidden_state + input_tensor
370
+ return hidden_state
371
+
372
+
373
+ class CvtLayer(nn.Module):
374
+ """
375
+ CvtLayer composed by attention layers, normalization and multi-layer perceptrons (mlps).
376
+ """
377
+
378
+ def __init__(
379
+ self,
380
+ num_heads,
381
+ embed_dim,
382
+ kernel_size,
383
+ padding_q,
384
+ padding_kv,
385
+ stride_q,
386
+ stride_kv,
387
+ qkv_projection_method,
388
+ qkv_bias,
389
+ attention_drop_rate,
390
+ drop_rate,
391
+ mlp_ratio,
392
+ drop_path_rate,
393
+ with_cls_token=True,
394
+ ):
395
+ super().__init__()
396
+ self.attention = CvtAttention(
397
+ num_heads,
398
+ embed_dim,
399
+ kernel_size,
400
+ padding_q,
401
+ padding_kv,
402
+ stride_q,
403
+ stride_kv,
404
+ qkv_projection_method,
405
+ qkv_bias,
406
+ attention_drop_rate,
407
+ drop_rate,
408
+ with_cls_token,
409
+ )
410
+
411
+ self.intermediate = CvtIntermediate(embed_dim, mlp_ratio)
412
+ self.output = CvtOutput(embed_dim, mlp_ratio, drop_rate)
413
+ self.drop_path = CvtDropPath(drop_prob=drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
414
+ self.layernorm_before = nn.LayerNorm(embed_dim)
415
+ self.layernorm_after = nn.LayerNorm(embed_dim)
416
+
417
+ def forward(self, hidden_state, height, width):
418
+ self_attention_output = self.attention(
419
+ self.layernorm_before(hidden_state), # in Cvt, layernorm is applied before self-attention
420
+ height,
421
+ width,
422
+ )
423
+ attention_output = self_attention_output
424
+ attention_output = self.drop_path(attention_output)
425
+
426
+ # first residual connection
427
+ hidden_state = attention_output + hidden_state
428
+
429
+ # in Cvt, layernorm is also applied after self-attention
430
+ layer_output = self.layernorm_after(hidden_state)
431
+ layer_output = self.intermediate(layer_output)
432
+
433
+ # second residual connection is done here
434
+ layer_output = self.output(layer_output, hidden_state)
435
+ layer_output = self.drop_path(layer_output)
436
+ return layer_output
437
+
438
+
439
+ class CvtStage(nn.Module):
440
+ def __init__(self, config, stage):
441
+ super().__init__()
442
+ self.config = config
443
+ self.stage = stage
444
+ if self.config.cls_token[self.stage]:
445
+ self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.embed_dim[-1]))
446
+
447
+ self.embedding = CvtEmbeddings(
448
+ patch_size=config.patch_sizes[self.stage],
449
+ stride=config.patch_stride[self.stage],
450
+ num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1],
451
+ embed_dim=config.embed_dim[self.stage],
452
+ padding=config.patch_padding[self.stage],
453
+ dropout_rate=config.drop_rate[self.stage],
454
+ )
455
+
456
+ drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate[self.stage], config.depth[stage])]
457
+
458
+ self.layers = nn.Sequential(
459
+ *[
460
+ CvtLayer(
461
+ num_heads=config.num_heads[self.stage],
462
+ embed_dim=config.embed_dim[self.stage],
463
+ kernel_size=config.kernel_qkv[self.stage],
464
+ padding_q=config.padding_q[self.stage],
465
+ padding_kv=config.padding_kv[self.stage],
466
+ stride_kv=config.stride_kv[self.stage],
467
+ stride_q=config.stride_q[self.stage],
468
+ qkv_projection_method=config.qkv_projection_method[self.stage],
469
+ qkv_bias=config.qkv_bias[self.stage],
470
+ attention_drop_rate=config.attention_drop_rate[self.stage],
471
+ drop_rate=config.drop_rate[self.stage],
472
+ drop_path_rate=drop_path_rates[self.stage],
473
+ mlp_ratio=config.mlp_ratio[self.stage],
474
+ with_cls_token=config.cls_token[self.stage],
475
+ )
476
+ for _ in range(config.depth[self.stage])
477
+ ]
478
+ )
479
+
480
+ def forward(self, hidden_state):
481
+ cls_token = None
482
+ hidden_state = self.embedding(hidden_state)
483
+ batch_size, num_channels, height, width = hidden_state.shape
484
+ # rearrange b c h w -> b (h w) c"
485
+ hidden_state = hidden_state.view(batch_size, num_channels, height * width).permute(0, 2, 1)
486
+ if self.config.cls_token[self.stage]:
487
+ cls_token = self.cls_token.expand(batch_size, -1, -1)
488
+ hidden_state = torch.cat((cls_token, hidden_state), dim=1)
489
+
490
+ for layer in self.layers:
491
+ layer_outputs = layer(hidden_state, height, width)
492
+ hidden_state = layer_outputs
493
+
494
+ if self.config.cls_token[self.stage]:
495
+ cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
496
+ hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
497
+ return hidden_state, cls_token
498
+
499
+
500
+ class CvtEncoder(nn.Module):
501
+ def __init__(self, config):
502
+ super().__init__()
503
+ self.config = config
504
+ self.stages = nn.ModuleList([])
505
+ for stage_idx in range(len(config.depth)):
506
+ self.stages.append(CvtStage(config, stage_idx))
507
+
508
+ def forward(self, pixel_values, output_hidden_states=False, return_dict=True):
509
+ all_hidden_states = () if output_hidden_states else None
510
+ hidden_state = pixel_values
511
+
512
+ cls_token = None
513
+ for _, (stage_module) in enumerate(self.stages):
514
+ hidden_state, cls_token = stage_module(hidden_state)
515
+ if output_hidden_states:
516
+ all_hidden_states = all_hidden_states + (hidden_state,)
517
+
518
+ if not return_dict:
519
+ return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)
520
+
521
+ return BaseModelOutputWithCLSToken(
522
+ last_hidden_state=hidden_state,
523
+ cls_token_value=cls_token,
524
+ hidden_states=all_hidden_states,
525
+ )
526
+
527
+
528
+ class CvtPreTrainedModel(PreTrainedModel):
529
+ """
530
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
531
+ models.
532
+ """
533
+
534
+ config_class = CvtConfig
535
+ base_model_prefix = "cvt"
536
+ main_input_name = "pixel_values"
537
+
538
+ def _init_weights(self, module):
539
+ """Initialize the weights"""
540
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
541
+ module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range)
542
+ if module.bias is not None:
543
+ module.bias.data.zero_()
544
+ elif isinstance(module, nn.LayerNorm):
545
+ module.bias.data.zero_()
546
+ module.weight.data.fill_(1.0)
547
+ elif isinstance(module, CvtStage):
548
+ if self.config.cls_token[module.stage]:
549
+ module.cls_token.data = nn.init.trunc_normal_(
550
+ torch.zeros(1, 1, self.config.embed_dim[-1]), mean=0.0, std=self.config.initializer_range
551
+ )
552
+
553
+
554
+ CVT_START_DOCSTRING = r"""
555
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
556
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
557
+ behavior.
558
+
559
+ Parameters:
560
+ config ([`CvtConfig`]): Model configuration class with all the parameters of the model.
561
+ Initializing with a config file does not load the weights associated with the model, only the
562
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
563
+ """
564
+
565
+ CVT_INPUTS_DOCSTRING = r"""
566
+ Args:
567
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
568
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]
569
+ for details.
570
+ output_hidden_states (`bool`, *optional*):
571
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
572
+ more detail.
573
+ return_dict (`bool`, *optional*):
574
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
575
+ """
576
+
577
+
578
+ @add_start_docstrings(
579
+ "The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.",
580
+ CVT_START_DOCSTRING,
581
+ )
582
+ class CvtModel(CvtPreTrainedModel):
583
+ def __init__(self, config, add_pooling_layer=True):
584
+ super().__init__(config)
585
+ self.config = config
586
+ self.encoder = CvtEncoder(config)
587
+ self.post_init()
588
+
589
+ def _prune_heads(self, heads_to_prune):
590
+ """
591
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
592
+ class PreTrainedModel
593
+ """
594
+ for layer, heads in heads_to_prune.items():
595
+ self.encoder.layer[layer].attention.prune_heads(heads)
596
+
597
+ @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
598
+ @add_code_sample_docstrings(
599
+ checkpoint=_CHECKPOINT_FOR_DOC,
600
+ output_type=BaseModelOutputWithCLSToken,
601
+ config_class=_CONFIG_FOR_DOC,
602
+ modality="vision",
603
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
604
+ )
605
+ def forward(
606
+ self,
607
+ pixel_values: Optional[torch.Tensor] = None,
608
+ output_hidden_states: Optional[bool] = None,
609
+ return_dict: Optional[bool] = None,
610
+ ) -> Union[Tuple, BaseModelOutputWithCLSToken]:
611
+ output_hidden_states = (
612
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
613
+ )
614
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
615
+
616
+ if pixel_values is None:
617
+ raise ValueError("You have to specify pixel_values")
618
+
619
+ encoder_outputs = self.encoder(
620
+ pixel_values,
621
+ output_hidden_states=output_hidden_states,
622
+ return_dict=return_dict,
623
+ )
624
+ sequence_output = encoder_outputs[0]
625
+
626
+ if not return_dict:
627
+ return (sequence_output,) + encoder_outputs[1:]
628
+
629
+ return BaseModelOutputWithCLSToken(
630
+ last_hidden_state=sequence_output,
631
+ cls_token_value=encoder_outputs.cls_token_value,
632
+ hidden_states=encoder_outputs.hidden_states,
633
+ )
634
+
635
+
636
+ @add_start_docstrings(
637
+ """
638
+ Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
639
+ the [CLS] token) e.g. for ImageNet.
640
+ """,
641
+ CVT_START_DOCSTRING,
642
+ )
643
+ class CvtForImageClassification(CvtPreTrainedModel):
644
+ def __init__(self, config):
645
+ super().__init__(config)
646
+
647
+ self.num_labels = config.num_labels
648
+ self.cvt = CvtModel(config, add_pooling_layer=False)
649
+ self.layernorm = nn.LayerNorm(config.embed_dim[-1])
650
+ # Classifier head
651
+ self.classifier = (
652
+ nn.Linear(config.embed_dim[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
653
+ )
654
+
655
+ # Initialize weights and apply final processing
656
+ self.post_init()
657
+
658
+ @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
659
+ @add_code_sample_docstrings(
660
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
661
+ output_type=ImageClassifierOutputWithNoAttention,
662
+ config_class=_CONFIG_FOR_DOC,
663
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
664
+ )
665
+ def forward(
666
+ self,
667
+ pixel_values: Optional[torch.Tensor] = None,
668
+ labels: Optional[torch.Tensor] = None,
669
+ output_hidden_states: Optional[bool] = None,
670
+ return_dict: Optional[bool] = None,
671
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
672
+ r"""
673
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
674
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
675
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
676
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
677
+ """
678
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
679
+ outputs = self.cvt(
680
+ pixel_values,
681
+ output_hidden_states=output_hidden_states,
682
+ return_dict=return_dict,
683
+ )
684
+
685
+ sequence_output = outputs[0]
686
+ cls_token = outputs[1]
687
+ if self.config.cls_token[-1]:
688
+ sequence_output = self.layernorm(cls_token)
689
+ else:
690
+ batch_size, num_channels, height, width = sequence_output.shape
691
+ # rearrange "b c h w -> b (h w) c"
692
+ sequence_output = sequence_output.view(batch_size, num_channels, height * width).permute(0, 2, 1)
693
+ sequence_output = self.layernorm(sequence_output)
694
+
695
+ sequence_output_mean = sequence_output.mean(dim=1)
696
+ logits = self.classifier(sequence_output_mean)
697
+
698
+ loss = None
699
+ if labels is not None:
700
+ if self.config.problem_type is None:
701
+ if self.config.num_labels == 1:
702
+ self.config.problem_type = "regression"
703
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
704
+ self.config.problem_type = "single_label_classification"
705
+ else:
706
+ self.config.problem_type = "multi_label_classification"
707
+
708
+ if self.config.problem_type == "regression":
709
+ loss_fct = MSELoss()
710
+ if self.config.num_labels == 1:
711
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
712
+ else:
713
+ loss = loss_fct(logits, labels)
714
+ elif self.config.problem_type == "single_label_classification":
715
+ loss_fct = CrossEntropyLoss()
716
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
717
+ elif self.config.problem_type == "multi_label_classification":
718
+ loss_fct = BCEWithLogitsLoss()
719
+ loss = loss_fct(logits, labels)
720
+
721
+ if not return_dict:
722
+ output = (logits,) + outputs[2:]
723
+ return ((loss,) + output) if loss is not None else output
724
+
725
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
llmeval-env/lib/python3.10/site-packages/transformers/models/cvt/modeling_tf_cvt.py ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 Cvt model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import collections.abc
21
+ from dataclasses import dataclass
22
+ from typing import Optional, Tuple, Union
23
+
24
+ import tensorflow as tf
25
+
26
+ from ...modeling_tf_outputs import TFImageClassifierOutputWithNoAttention
27
+ from ...modeling_tf_utils import (
28
+ TFModelInputType,
29
+ TFPreTrainedModel,
30
+ TFSequenceClassificationLoss,
31
+ get_initializer,
32
+ keras,
33
+ keras_serializable,
34
+ unpack_inputs,
35
+ )
36
+ from ...tf_utils import shape_list, stable_softmax
37
+ from ...utils import (
38
+ ModelOutput,
39
+ add_start_docstrings,
40
+ add_start_docstrings_to_model_forward,
41
+ logging,
42
+ replace_return_docstrings,
43
+ )
44
+ from .configuration_cvt import CvtConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ # General docstring
50
+ _CONFIG_FOR_DOC = "CvtConfig"
51
+
52
+
53
+ from ..deprecated._archive_maps import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
54
+
55
+
56
+ @dataclass
57
+ class TFBaseModelOutputWithCLSToken(ModelOutput):
58
+ """
59
+ Base class for model's outputs.
60
+
61
+ Args:
62
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
63
+ Sequence of hidden-states at the output of the last layer of the model.
64
+ cls_token_value (`tf.Tensor` of shape `(batch_size, 1, hidden_size)`):
65
+ Classification token at the output of the last layer of the model.
66
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
67
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
68
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
69
+ the initial embedding outputs.
70
+ """
71
+
72
+ last_hidden_state: tf.Tensor = None
73
+ cls_token_value: tf.Tensor = None
74
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
75
+
76
+
77
+ class TFCvtDropPath(keras.layers.Layer):
78
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
79
+ References:
80
+ (1) github.com:rwightman/pytorch-image-models
81
+ """
82
+
83
+ def __init__(self, drop_prob: float, **kwargs):
84
+ super().__init__(**kwargs)
85
+ self.drop_prob = drop_prob
86
+
87
+ def call(self, x: tf.Tensor, training=None):
88
+ if self.drop_prob == 0.0 or not training:
89
+ return x
90
+ keep_prob = 1 - self.drop_prob
91
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
92
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1, dtype=self.compute_dtype)
93
+ random_tensor = tf.floor(random_tensor)
94
+ return (x / keep_prob) * random_tensor
95
+
96
+
97
+ class TFCvtEmbeddings(keras.layers.Layer):
98
+ """Construct the Convolutional Token Embeddings."""
99
+
100
+ def __init__(
101
+ self,
102
+ config: CvtConfig,
103
+ patch_size: int,
104
+ num_channels: int,
105
+ embed_dim: int,
106
+ stride: int,
107
+ padding: int,
108
+ dropout_rate: float,
109
+ **kwargs,
110
+ ):
111
+ super().__init__(**kwargs)
112
+ self.convolution_embeddings = TFCvtConvEmbeddings(
113
+ config,
114
+ patch_size=patch_size,
115
+ num_channels=num_channels,
116
+ embed_dim=embed_dim,
117
+ stride=stride,
118
+ padding=padding,
119
+ name="convolution_embeddings",
120
+ )
121
+ self.dropout = keras.layers.Dropout(dropout_rate)
122
+
123
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
124
+ hidden_state = self.convolution_embeddings(pixel_values)
125
+ hidden_state = self.dropout(hidden_state, training=training)
126
+ return hidden_state
127
+
128
+ def build(self, input_shape=None):
129
+ if self.built:
130
+ return
131
+ self.built = True
132
+ if getattr(self, "convolution_embeddings", None) is not None:
133
+ with tf.name_scope(self.convolution_embeddings.name):
134
+ self.convolution_embeddings.build(None)
135
+
136
+
137
+ class TFCvtConvEmbeddings(keras.layers.Layer):
138
+ """Image to Convolution Embeddings. This convolutional operation aims to model local spatial contexts."""
139
+
140
+ def __init__(
141
+ self,
142
+ config: CvtConfig,
143
+ patch_size: int,
144
+ num_channels: int,
145
+ embed_dim: int,
146
+ stride: int,
147
+ padding: int,
148
+ **kwargs,
149
+ ):
150
+ super().__init__(**kwargs)
151
+ self.padding = keras.layers.ZeroPadding2D(padding=padding)
152
+ self.patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
153
+ self.projection = keras.layers.Conv2D(
154
+ filters=embed_dim,
155
+ kernel_size=patch_size,
156
+ strides=stride,
157
+ padding="valid",
158
+ data_format="channels_last",
159
+ kernel_initializer=get_initializer(config.initializer_range),
160
+ name="projection",
161
+ )
162
+ # Using the same default epsilon as PyTorch
163
+ self.normalization = keras.layers.LayerNormalization(epsilon=1e-5, name="normalization")
164
+ self.num_channels = num_channels
165
+ self.embed_dim = embed_dim
166
+
167
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
168
+ if isinstance(pixel_values, dict):
169
+ pixel_values = pixel_values["pixel_values"]
170
+
171
+ pixel_values = self.projection(self.padding(pixel_values))
172
+
173
+ # "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
174
+ batch_size, height, width, num_channels = shape_list(pixel_values)
175
+ hidden_size = height * width
176
+ pixel_values = tf.reshape(pixel_values, shape=(batch_size, hidden_size, num_channels))
177
+ pixel_values = self.normalization(pixel_values)
178
+
179
+ # "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
180
+ pixel_values = tf.reshape(pixel_values, shape=(batch_size, height, width, num_channels))
181
+ return pixel_values
182
+
183
+ def build(self, input_shape=None):
184
+ if self.built:
185
+ return
186
+ self.built = True
187
+ if getattr(self, "projection", None) is not None:
188
+ with tf.name_scope(self.projection.name):
189
+ self.projection.build([None, None, None, self.num_channels])
190
+ if getattr(self, "normalization", None) is not None:
191
+ with tf.name_scope(self.normalization.name):
192
+ self.normalization.build([None, None, self.embed_dim])
193
+
194
+
195
+ class TFCvtSelfAttentionConvProjection(keras.layers.Layer):
196
+ """Convolutional projection layer."""
197
+
198
+ def __init__(self, config: CvtConfig, embed_dim: int, kernel_size: int, stride: int, padding: int, **kwargs):
199
+ super().__init__(**kwargs)
200
+ self.padding = keras.layers.ZeroPadding2D(padding=padding)
201
+ self.convolution = keras.layers.Conv2D(
202
+ filters=embed_dim,
203
+ kernel_size=kernel_size,
204
+ kernel_initializer=get_initializer(config.initializer_range),
205
+ padding="valid",
206
+ strides=stride,
207
+ use_bias=False,
208
+ name="convolution",
209
+ groups=embed_dim,
210
+ )
211
+ # Using the same default epsilon as PyTorch, TF uses (1 - pytorch momentum)
212
+ self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
213
+ self.embed_dim = embed_dim
214
+
215
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
216
+ hidden_state = self.convolution(self.padding(hidden_state))
217
+ hidden_state = self.normalization(hidden_state, training=training)
218
+ return hidden_state
219
+
220
+ def build(self, input_shape=None):
221
+ if self.built:
222
+ return
223
+ self.built = True
224
+ if getattr(self, "convolution", None) is not None:
225
+ with tf.name_scope(self.convolution.name):
226
+ self.convolution.build([None, None, None, self.embed_dim])
227
+ if getattr(self, "normalization", None) is not None:
228
+ with tf.name_scope(self.normalization.name):
229
+ self.normalization.build([None, None, None, self.embed_dim])
230
+
231
+
232
+ class TFCvtSelfAttentionLinearProjection(keras.layers.Layer):
233
+ """Linear projection layer used to flatten tokens into 1D."""
234
+
235
+ def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
236
+ # "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
237
+ batch_size, height, width, num_channels = shape_list(hidden_state)
238
+ hidden_size = height * width
239
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
240
+ return hidden_state
241
+
242
+
243
+ class TFCvtSelfAttentionProjection(keras.layers.Layer):
244
+ """Convolutional Projection for Attention."""
245
+
246
+ def __init__(
247
+ self,
248
+ config: CvtConfig,
249
+ embed_dim: int,
250
+ kernel_size: int,
251
+ stride: int,
252
+ padding: int,
253
+ projection_method: str = "dw_bn",
254
+ **kwargs,
255
+ ):
256
+ super().__init__(**kwargs)
257
+ if projection_method == "dw_bn":
258
+ self.convolution_projection = TFCvtSelfAttentionConvProjection(
259
+ config, embed_dim, kernel_size, stride, padding, name="convolution_projection"
260
+ )
261
+ self.linear_projection = TFCvtSelfAttentionLinearProjection()
262
+
263
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
264
+ hidden_state = self.convolution_projection(hidden_state, training=training)
265
+ hidden_state = self.linear_projection(hidden_state)
266
+ return hidden_state
267
+
268
+ def build(self, input_shape=None):
269
+ if self.built:
270
+ return
271
+ self.built = True
272
+ if getattr(self, "convolution_projection", None) is not None:
273
+ with tf.name_scope(self.convolution_projection.name):
274
+ self.convolution_projection.build(None)
275
+
276
+
277
+ class TFCvtSelfAttention(keras.layers.Layer):
278
+ """
279
+ Self-attention layer. A depth-wise separable convolution operation (Convolutional Projection), is applied for
280
+ query, key, and value embeddings.
281
+ """
282
+
283
+ def __init__(
284
+ self,
285
+ config: CvtConfig,
286
+ num_heads: int,
287
+ embed_dim: int,
288
+ kernel_size: int,
289
+ stride_q: int,
290
+ stride_kv: int,
291
+ padding_q: int,
292
+ padding_kv: int,
293
+ qkv_projection_method: str,
294
+ qkv_bias: bool,
295
+ attention_drop_rate: float,
296
+ with_cls_token: bool = True,
297
+ **kwargs,
298
+ ):
299
+ super().__init__(**kwargs)
300
+ self.scale = embed_dim**-0.5
301
+ self.with_cls_token = with_cls_token
302
+ self.embed_dim = embed_dim
303
+ self.num_heads = num_heads
304
+
305
+ self.convolution_projection_query = TFCvtSelfAttentionProjection(
306
+ config,
307
+ embed_dim,
308
+ kernel_size,
309
+ stride_q,
310
+ padding_q,
311
+ projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method,
312
+ name="convolution_projection_query",
313
+ )
314
+ self.convolution_projection_key = TFCvtSelfAttentionProjection(
315
+ config,
316
+ embed_dim,
317
+ kernel_size,
318
+ stride_kv,
319
+ padding_kv,
320
+ projection_method=qkv_projection_method,
321
+ name="convolution_projection_key",
322
+ )
323
+ self.convolution_projection_value = TFCvtSelfAttentionProjection(
324
+ config,
325
+ embed_dim,
326
+ kernel_size,
327
+ stride_kv,
328
+ padding_kv,
329
+ projection_method=qkv_projection_method,
330
+ name="convolution_projection_value",
331
+ )
332
+
333
+ self.projection_query = keras.layers.Dense(
334
+ units=embed_dim,
335
+ kernel_initializer=get_initializer(config.initializer_range),
336
+ use_bias=qkv_bias,
337
+ bias_initializer="zeros",
338
+ name="projection_query",
339
+ )
340
+ self.projection_key = keras.layers.Dense(
341
+ units=embed_dim,
342
+ kernel_initializer=get_initializer(config.initializer_range),
343
+ use_bias=qkv_bias,
344
+ bias_initializer="zeros",
345
+ name="projection_key",
346
+ )
347
+ self.projection_value = keras.layers.Dense(
348
+ units=embed_dim,
349
+ kernel_initializer=get_initializer(config.initializer_range),
350
+ use_bias=qkv_bias,
351
+ bias_initializer="zeros",
352
+ name="projection_value",
353
+ )
354
+ self.dropout = keras.layers.Dropout(attention_drop_rate)
355
+
356
+ def rearrange_for_multi_head_attention(self, hidden_state: tf.Tensor) -> tf.Tensor:
357
+ batch_size, hidden_size, _ = shape_list(hidden_state)
358
+ head_dim = self.embed_dim // self.num_heads
359
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, self.num_heads, head_dim))
360
+ hidden_state = tf.transpose(hidden_state, perm=(0, 2, 1, 3))
361
+ return hidden_state
362
+
363
+ def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor:
364
+ if self.with_cls_token:
365
+ cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
366
+
367
+ # "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
368
+ batch_size, hidden_size, num_channels = shape_list(hidden_state)
369
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
370
+
371
+ key = self.convolution_projection_key(hidden_state, training=training)
372
+ query = self.convolution_projection_query(hidden_state, training=training)
373
+ value = self.convolution_projection_value(hidden_state, training=training)
374
+
375
+ if self.with_cls_token:
376
+ query = tf.concat((cls_token, query), axis=1)
377
+ key = tf.concat((cls_token, key), axis=1)
378
+ value = tf.concat((cls_token, value), axis=1)
379
+
380
+ head_dim = self.embed_dim // self.num_heads
381
+
382
+ query = self.rearrange_for_multi_head_attention(self.projection_query(query))
383
+ key = self.rearrange_for_multi_head_attention(self.projection_key(key))
384
+ value = self.rearrange_for_multi_head_attention(self.projection_value(value))
385
+
386
+ attention_score = tf.matmul(query, key, transpose_b=True) * self.scale
387
+ attention_probs = stable_softmax(logits=attention_score, axis=-1)
388
+ attention_probs = self.dropout(attention_probs, training=training)
389
+
390
+ context = tf.matmul(attention_probs, value)
391
+ # "batch_size, num_heads, hidden_size, head_dim -> batch_size, hidden_size, (num_heads*head_dim)"
392
+ _, _, hidden_size, _ = shape_list(context)
393
+ context = tf.transpose(context, perm=(0, 2, 1, 3))
394
+ context = tf.reshape(context, (batch_size, hidden_size, self.num_heads * head_dim))
395
+ return context
396
+
397
+ def build(self, input_shape=None):
398
+ if self.built:
399
+ return
400
+ self.built = True
401
+ if getattr(self, "convolution_projection_query", None) is not None:
402
+ with tf.name_scope(self.convolution_projection_query.name):
403
+ self.convolution_projection_query.build(None)
404
+ if getattr(self, "convolution_projection_key", None) is not None:
405
+ with tf.name_scope(self.convolution_projection_key.name):
406
+ self.convolution_projection_key.build(None)
407
+ if getattr(self, "convolution_projection_value", None) is not None:
408
+ with tf.name_scope(self.convolution_projection_value.name):
409
+ self.convolution_projection_value.build(None)
410
+ if getattr(self, "projection_query", None) is not None:
411
+ with tf.name_scope(self.projection_query.name):
412
+ self.projection_query.build([None, None, self.embed_dim])
413
+ if getattr(self, "projection_key", None) is not None:
414
+ with tf.name_scope(self.projection_key.name):
415
+ self.projection_key.build([None, None, self.embed_dim])
416
+ if getattr(self, "projection_value", None) is not None:
417
+ with tf.name_scope(self.projection_value.name):
418
+ self.projection_value.build([None, None, self.embed_dim])
419
+
420
+
421
+ class TFCvtSelfOutput(keras.layers.Layer):
422
+ """Output of the Attention layer ."""
423
+
424
+ def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: float, **kwargs):
425
+ super().__init__(**kwargs)
426
+ self.dense = keras.layers.Dense(
427
+ units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="dense"
428
+ )
429
+ self.dropout = keras.layers.Dropout(drop_rate)
430
+ self.embed_dim = embed_dim
431
+
432
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
433
+ hidden_state = self.dense(inputs=hidden_state)
434
+ hidden_state = self.dropout(inputs=hidden_state, training=training)
435
+ return hidden_state
436
+
437
+ def build(self, input_shape=None):
438
+ if self.built:
439
+ return
440
+ self.built = True
441
+ if getattr(self, "dense", None) is not None:
442
+ with tf.name_scope(self.dense.name):
443
+ self.dense.build([None, None, self.embed_dim])
444
+
445
+
446
+ class TFCvtAttention(keras.layers.Layer):
447
+ """Attention layer. First chunk of the convolutional transformer block."""
448
+
449
+ def __init__(
450
+ self,
451
+ config: CvtConfig,
452
+ num_heads: int,
453
+ embed_dim: int,
454
+ kernel_size: int,
455
+ stride_q: int,
456
+ stride_kv: int,
457
+ padding_q: int,
458
+ padding_kv: int,
459
+ qkv_projection_method: str,
460
+ qkv_bias: bool,
461
+ attention_drop_rate: float,
462
+ drop_rate: float,
463
+ with_cls_token: bool = True,
464
+ **kwargs,
465
+ ):
466
+ super().__init__(**kwargs)
467
+ self.attention = TFCvtSelfAttention(
468
+ config,
469
+ num_heads,
470
+ embed_dim,
471
+ kernel_size,
472
+ stride_q,
473
+ stride_kv,
474
+ padding_q,
475
+ padding_kv,
476
+ qkv_projection_method,
477
+ qkv_bias,
478
+ attention_drop_rate,
479
+ with_cls_token,
480
+ name="attention",
481
+ )
482
+ self.dense_output = TFCvtSelfOutput(config, embed_dim, drop_rate, name="output")
483
+
484
+ def prune_heads(self, heads):
485
+ raise NotImplementedError
486
+
487
+ def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False):
488
+ self_output = self.attention(hidden_state, height, width, training=training)
489
+ attention_output = self.dense_output(self_output, training=training)
490
+ return attention_output
491
+
492
+ def build(self, input_shape=None):
493
+ if self.built:
494
+ return
495
+ self.built = True
496
+ if getattr(self, "attention", None) is not None:
497
+ with tf.name_scope(self.attention.name):
498
+ self.attention.build(None)
499
+ if getattr(self, "dense_output", None) is not None:
500
+ with tf.name_scope(self.dense_output.name):
501
+ self.dense_output.build(None)
502
+
503
+
504
+ class TFCvtIntermediate(keras.layers.Layer):
505
+ """Intermediate dense layer. Second chunk of the convolutional transformer block."""
506
+
507
+ def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, **kwargs):
508
+ super().__init__(**kwargs)
509
+ self.dense = keras.layers.Dense(
510
+ units=int(embed_dim * mlp_ratio),
511
+ kernel_initializer=get_initializer(config.initializer_range),
512
+ activation="gelu",
513
+ name="dense",
514
+ )
515
+ self.embed_dim = embed_dim
516
+
517
+ def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
518
+ hidden_state = self.dense(hidden_state)
519
+ return hidden_state
520
+
521
+ def build(self, input_shape=None):
522
+ if self.built:
523
+ return
524
+ self.built = True
525
+ if getattr(self, "dense", None) is not None:
526
+ with tf.name_scope(self.dense.name):
527
+ self.dense.build([None, None, self.embed_dim])
528
+
529
+
530
+ class TFCvtOutput(keras.layers.Layer):
531
+ """
532
+ Output of the Convolutional Transformer Block (last chunk). It consists of a MLP and a residual connection.
533
+ """
534
+
535
+ def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, drop_rate: int, **kwargs):
536
+ super().__init__(**kwargs)
537
+ self.dense = keras.layers.Dense(
538
+ units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="dense"
539
+ )
540
+ self.dropout = keras.layers.Dropout(drop_rate)
541
+ self.embed_dim = embed_dim
542
+ self.mlp_ratio = mlp_ratio
543
+
544
+ def call(self, hidden_state: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
545
+ hidden_state = self.dense(inputs=hidden_state)
546
+ hidden_state = self.dropout(inputs=hidden_state, training=training)
547
+ hidden_state = hidden_state + input_tensor
548
+ return hidden_state
549
+
550
+ def build(self, input_shape=None):
551
+ if self.built:
552
+ return
553
+ self.built = True
554
+ if getattr(self, "dense", None) is not None:
555
+ with tf.name_scope(self.dense.name):
556
+ self.dense.build([None, None, int(self.embed_dim * self.mlp_ratio)])
557
+
558
+
559
+ class TFCvtLayer(keras.layers.Layer):
560
+ """
561
+ Convolutional Transformer Block composed by attention layers, normalization and multi-layer perceptrons (mlps). It
562
+ consists of 3 chunks : an attention layer, an intermediate dense layer and an output layer. This corresponds to the
563
+ `Block` class in the original implementation.
564
+ """
565
+
566
+ def __init__(
567
+ self,
568
+ config: CvtConfig,
569
+ num_heads: int,
570
+ embed_dim: int,
571
+ kernel_size: int,
572
+ stride_q: int,
573
+ stride_kv: int,
574
+ padding_q: int,
575
+ padding_kv: int,
576
+ qkv_projection_method: str,
577
+ qkv_bias: bool,
578
+ attention_drop_rate: float,
579
+ drop_rate: float,
580
+ mlp_ratio: float,
581
+ drop_path_rate: float,
582
+ with_cls_token: bool = True,
583
+ **kwargs,
584
+ ):
585
+ super().__init__(**kwargs)
586
+ self.attention = TFCvtAttention(
587
+ config,
588
+ num_heads,
589
+ embed_dim,
590
+ kernel_size,
591
+ stride_q,
592
+ stride_kv,
593
+ padding_q,
594
+ padding_kv,
595
+ qkv_projection_method,
596
+ qkv_bias,
597
+ attention_drop_rate,
598
+ drop_rate,
599
+ with_cls_token,
600
+ name="attention",
601
+ )
602
+ self.intermediate = TFCvtIntermediate(config, embed_dim, mlp_ratio, name="intermediate")
603
+ self.dense_output = TFCvtOutput(config, embed_dim, mlp_ratio, drop_rate, name="output")
604
+ # Using `layers.Activation` instead of `tf.identity` to better control `training` behaviour.
605
+ self.drop_path = (
606
+ TFCvtDropPath(drop_path_rate, name="drop_path")
607
+ if drop_path_rate > 0.0
608
+ else keras.layers.Activation("linear", name="drop_path")
609
+ )
610
+ # Using the same default epsilon as PyTorch
611
+ self.layernorm_before = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_before")
612
+ self.layernorm_after = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_after")
613
+ self.embed_dim = embed_dim
614
+
615
+ def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor:
616
+ # in Cvt, layernorm is applied before self-attention
617
+ attention_output = self.attention(self.layernorm_before(hidden_state), height, width, training=training)
618
+ attention_output = self.drop_path(attention_output, training=training)
619
+
620
+ # first residual connection
621
+ hidden_state = attention_output + hidden_state
622
+
623
+ # in Cvt, layernorm is also applied after self-attention
624
+ layer_output = self.layernorm_after(hidden_state)
625
+ layer_output = self.intermediate(layer_output)
626
+
627
+ # second residual connection is done here
628
+ layer_output = self.dense_output(layer_output, hidden_state)
629
+ layer_output = self.drop_path(layer_output, training=training)
630
+ return layer_output
631
+
632
+ def build(self, input_shape=None):
633
+ if self.built:
634
+ return
635
+ self.built = True
636
+ if getattr(self, "attention", None) is not None:
637
+ with tf.name_scope(self.attention.name):
638
+ self.attention.build(None)
639
+ if getattr(self, "intermediate", None) is not None:
640
+ with tf.name_scope(self.intermediate.name):
641
+ self.intermediate.build(None)
642
+ if getattr(self, "dense_output", None) is not None:
643
+ with tf.name_scope(self.dense_output.name):
644
+ self.dense_output.build(None)
645
+ if getattr(self, "drop_path", None) is not None:
646
+ with tf.name_scope(self.drop_path.name):
647
+ self.drop_path.build(None)
648
+ if getattr(self, "layernorm_before", None) is not None:
649
+ with tf.name_scope(self.layernorm_before.name):
650
+ self.layernorm_before.build([None, None, self.embed_dim])
651
+ if getattr(self, "layernorm_after", None) is not None:
652
+ with tf.name_scope(self.layernorm_after.name):
653
+ self.layernorm_after.build([None, None, self.embed_dim])
654
+
655
+
656
+ class TFCvtStage(keras.layers.Layer):
657
+ """
658
+ Cvt stage (encoder block). Each stage has 2 parts :
659
+ - (1) A Convolutional Token Embedding layer
660
+ - (2) A Convolutional Transformer Block (layer).
661
+ The classification token is added only in the last stage.
662
+
663
+ Args:
664
+ config ([`CvtConfig`]): Model configuration class.
665
+ stage (`int`): Stage number.
666
+ """
667
+
668
+ def __init__(self, config: CvtConfig, stage: int, **kwargs):
669
+ super().__init__(**kwargs)
670
+ self.config = config
671
+ self.stage = stage
672
+ if self.config.cls_token[self.stage]:
673
+ self.cls_token = self.add_weight(
674
+ shape=(1, 1, self.config.embed_dim[-1]),
675
+ initializer=get_initializer(self.config.initializer_range),
676
+ trainable=True,
677
+ name="cvt.encoder.stages.2.cls_token",
678
+ )
679
+
680
+ self.embedding = TFCvtEmbeddings(
681
+ self.config,
682
+ patch_size=config.patch_sizes[self.stage],
683
+ num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1],
684
+ stride=config.patch_stride[self.stage],
685
+ embed_dim=config.embed_dim[self.stage],
686
+ padding=config.patch_padding[self.stage],
687
+ dropout_rate=config.drop_rate[self.stage],
688
+ name="embedding",
689
+ )
690
+
691
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate[self.stage], config.depth[stage])
692
+ drop_path_rates = [x.numpy().item() for x in drop_path_rates]
693
+ self.layers = [
694
+ TFCvtLayer(
695
+ config,
696
+ num_heads=config.num_heads[self.stage],
697
+ embed_dim=config.embed_dim[self.stage],
698
+ kernel_size=config.kernel_qkv[self.stage],
699
+ stride_q=config.stride_q[self.stage],
700
+ stride_kv=config.stride_kv[self.stage],
701
+ padding_q=config.padding_q[self.stage],
702
+ padding_kv=config.padding_kv[self.stage],
703
+ qkv_projection_method=config.qkv_projection_method[self.stage],
704
+ qkv_bias=config.qkv_bias[self.stage],
705
+ attention_drop_rate=config.attention_drop_rate[self.stage],
706
+ drop_rate=config.drop_rate[self.stage],
707
+ mlp_ratio=config.mlp_ratio[self.stage],
708
+ drop_path_rate=drop_path_rates[self.stage],
709
+ with_cls_token=config.cls_token[self.stage],
710
+ name=f"layers.{j}",
711
+ )
712
+ for j in range(config.depth[self.stage])
713
+ ]
714
+
715
+ def call(self, hidden_state: tf.Tensor, training: bool = False):
716
+ cls_token = None
717
+ hidden_state = self.embedding(hidden_state, training)
718
+
719
+ # "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
720
+ batch_size, height, width, num_channels = shape_list(hidden_state)
721
+ hidden_size = height * width
722
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
723
+
724
+ if self.config.cls_token[self.stage]:
725
+ cls_token = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
726
+ hidden_state = tf.concat((cls_token, hidden_state), axis=1)
727
+
728
+ for layer in self.layers:
729
+ layer_outputs = layer(hidden_state, height, width, training=training)
730
+ hidden_state = layer_outputs
731
+
732
+ if self.config.cls_token[self.stage]:
733
+ cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
734
+
735
+ # "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
736
+ hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
737
+ return hidden_state, cls_token
738
+
739
+ def build(self, input_shape=None):
740
+ if self.built:
741
+ return
742
+ self.built = True
743
+ if getattr(self, "embedding", None) is not None:
744
+ with tf.name_scope(self.embedding.name):
745
+ self.embedding.build(None)
746
+ if getattr(self, "layers", None) is not None:
747
+ for layer in self.layers:
748
+ with tf.name_scope(layer.name):
749
+ layer.build(None)
750
+
751
+
752
+ class TFCvtEncoder(keras.layers.Layer):
753
+ """
754
+ Convolutional Vision Transformer encoder. CVT has 3 stages of encoder blocks with their respective number of layers
755
+ (depth) being 1, 2 and 10.
756
+
757
+ Args:
758
+ config ([`CvtConfig`]): Model configuration class.
759
+ """
760
+
761
+ config_class = CvtConfig
762
+
763
+ def __init__(self, config: CvtConfig, **kwargs):
764
+ super().__init__(**kwargs)
765
+ self.config = config
766
+ self.stages = [
767
+ TFCvtStage(config, stage_idx, name=f"stages.{stage_idx}") for stage_idx in range(len(config.depth))
768
+ ]
769
+
770
+ def call(
771
+ self,
772
+ pixel_values: TFModelInputType,
773
+ output_hidden_states: Optional[bool] = False,
774
+ return_dict: Optional[bool] = True,
775
+ training: Optional[bool] = False,
776
+ ) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
777
+ all_hidden_states = () if output_hidden_states else None
778
+ hidden_state = pixel_values
779
+ # When running on CPU, `keras.layers.Conv2D` doesn't support (batch_size, num_channels, height, width)
780
+ # as input format. So change the input format to (batch_size, height, width, num_channels).
781
+ hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1))
782
+
783
+ cls_token = None
784
+ for _, (stage_module) in enumerate(self.stages):
785
+ hidden_state, cls_token = stage_module(hidden_state, training=training)
786
+ if output_hidden_states:
787
+ all_hidden_states = all_hidden_states + (hidden_state,)
788
+
789
+ # Change back to (batch_size, num_channels, height, width) format to have uniformity in the modules
790
+ hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2))
791
+ if output_hidden_states:
792
+ all_hidden_states = tuple([tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states])
793
+
794
+ if not return_dict:
795
+ return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)
796
+
797
+ return TFBaseModelOutputWithCLSToken(
798
+ last_hidden_state=hidden_state,
799
+ cls_token_value=cls_token,
800
+ hidden_states=all_hidden_states,
801
+ )
802
+
803
+ def build(self, input_shape=None):
804
+ if self.built:
805
+ return
806
+ self.built = True
807
+ if getattr(self, "stages", None) is not None:
808
+ for layer in self.stages:
809
+ with tf.name_scope(layer.name):
810
+ layer.build(None)
811
+
812
+
813
+ @keras_serializable
814
+ class TFCvtMainLayer(keras.layers.Layer):
815
+ """Construct the Cvt model."""
816
+
817
+ config_class = CvtConfig
818
+
819
+ def __init__(self, config: CvtConfig, **kwargs):
820
+ super().__init__(**kwargs)
821
+ self.config = config
822
+ self.encoder = TFCvtEncoder(config, name="encoder")
823
+
824
+ @unpack_inputs
825
+ def call(
826
+ self,
827
+ pixel_values: TFModelInputType | None = None,
828
+ output_hidden_states: Optional[bool] = None,
829
+ return_dict: Optional[bool] = None,
830
+ training: Optional[bool] = False,
831
+ ) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
832
+ if pixel_values is None:
833
+ raise ValueError("You have to specify pixel_values")
834
+
835
+ encoder_outputs = self.encoder(
836
+ pixel_values,
837
+ output_hidden_states=output_hidden_states,
838
+ return_dict=return_dict,
839
+ training=training,
840
+ )
841
+
842
+ sequence_output = encoder_outputs[0]
843
+
844
+ if not return_dict:
845
+ return (sequence_output,) + encoder_outputs[1:]
846
+
847
+ return TFBaseModelOutputWithCLSToken(
848
+ last_hidden_state=sequence_output,
849
+ cls_token_value=encoder_outputs.cls_token_value,
850
+ hidden_states=encoder_outputs.hidden_states,
851
+ )
852
+
853
+ def build(self, input_shape=None):
854
+ if self.built:
855
+ return
856
+ self.built = True
857
+ if getattr(self, "encoder", None) is not None:
858
+ with tf.name_scope(self.encoder.name):
859
+ self.encoder.build(None)
860
+
861
+
862
+ class TFCvtPreTrainedModel(TFPreTrainedModel):
863
+ """
864
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
865
+ models.
866
+ """
867
+
868
+ config_class = CvtConfig
869
+ base_model_prefix = "cvt"
870
+ main_input_name = "pixel_values"
871
+
872
+
873
+ TFCVT_START_DOCSTRING = r"""
874
+
875
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
876
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
877
+ etc.)
878
+
879
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
880
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
881
+ behavior.
882
+
883
+ <Tip>
884
+
885
+ TF 2.0 models accepts two formats as inputs:
886
+
887
+ - having all inputs as keyword arguments (like PyTorch models), or
888
+ - having all inputs as a list, tuple or dict in the first positional arguments.
889
+
890
+ This second option is useful when using [`keras.Model.fit`] method which currently requires having all the
891
+ tensors in the first argument of the model call function: `model(inputs)`.
892
+
893
+ </Tip>
894
+
895
+ Args:
896
+ config ([`CvtConfig`]): Model configuration class with all the parameters of the model.
897
+ Initializing with a config file does not load the weights associated with the model, only the
898
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
899
+ """
900
+
901
+ TFCVT_INPUTS_DOCSTRING = r"""
902
+ Args:
903
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
904
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]
905
+ for details.
906
+
907
+ output_hidden_states (`bool`, *optional*):
908
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
909
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
910
+ used instead.
911
+ return_dict (`bool`, *optional*):
912
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
913
+ eager mode, in graph mode the value will always be set to True.
914
+ training (`bool`, *optional*, defaults to `False``):
915
+ Whether or not to use the model in training mode (some modules like dropout modules have different
916
+ behaviors between training and evaluation).
917
+ """
918
+
919
+
920
+ @add_start_docstrings(
921
+ "The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.",
922
+ TFCVT_START_DOCSTRING,
923
+ )
924
+ class TFCvtModel(TFCvtPreTrainedModel):
925
+ def __init__(self, config: CvtConfig, *inputs, **kwargs):
926
+ super().__init__(config, *inputs, **kwargs)
927
+
928
+ self.cvt = TFCvtMainLayer(config, name="cvt")
929
+
930
+ @unpack_inputs
931
+ @add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
932
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC)
933
+ def call(
934
+ self,
935
+ pixel_values: tf.Tensor | None = None,
936
+ output_hidden_states: Optional[bool] = None,
937
+ return_dict: Optional[bool] = None,
938
+ training: Optional[bool] = False,
939
+ ) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
940
+ r"""
941
+ Returns:
942
+
943
+ Examples:
944
+
945
+ ```python
946
+ >>> from transformers import AutoImageProcessor, TFCvtModel
947
+ >>> from PIL import Image
948
+ >>> import requests
949
+
950
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
951
+ >>> image = Image.open(requests.get(url, stream=True).raw)
952
+
953
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/cvt-13")
954
+ >>> model = TFCvtModel.from_pretrained("microsoft/cvt-13")
955
+
956
+ >>> inputs = image_processor(images=image, return_tensors="tf")
957
+ >>> outputs = model(**inputs)
958
+ >>> last_hidden_states = outputs.last_hidden_state
959
+ ```"""
960
+
961
+ if pixel_values is None:
962
+ raise ValueError("You have to specify pixel_values")
963
+
964
+ outputs = self.cvt(
965
+ pixel_values=pixel_values,
966
+ output_hidden_states=output_hidden_states,
967
+ return_dict=return_dict,
968
+ training=training,
969
+ )
970
+
971
+ if not return_dict:
972
+ return (outputs[0],) + outputs[1:]
973
+
974
+ return TFBaseModelOutputWithCLSToken(
975
+ last_hidden_state=outputs.last_hidden_state,
976
+ cls_token_value=outputs.cls_token_value,
977
+ hidden_states=outputs.hidden_states,
978
+ )
979
+
980
+ def build(self, input_shape=None):
981
+ if self.built:
982
+ return
983
+ self.built = True
984
+ if getattr(self, "cvt", None) is not None:
985
+ with tf.name_scope(self.cvt.name):
986
+ self.cvt.build(None)
987
+
988
+
989
+ @add_start_docstrings(
990
+ """
991
+ Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
992
+ the [CLS] token) e.g. for ImageNet.
993
+ """,
994
+ TFCVT_START_DOCSTRING,
995
+ )
996
+ class TFCvtForImageClassification(TFCvtPreTrainedModel, TFSequenceClassificationLoss):
997
+ def __init__(self, config: CvtConfig, *inputs, **kwargs):
998
+ super().__init__(config, *inputs, **kwargs)
999
+
1000
+ self.num_labels = config.num_labels
1001
+ self.cvt = TFCvtMainLayer(config, name="cvt")
1002
+ # Using same default epsilon as in the original implementation.
1003
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm")
1004
+
1005
+ # Classifier head
1006
+ self.classifier = keras.layers.Dense(
1007
+ units=config.num_labels,
1008
+ kernel_initializer=get_initializer(config.initializer_range),
1009
+ use_bias=True,
1010
+ bias_initializer="zeros",
1011
+ name="classifier",
1012
+ )
1013
+ self.config = config
1014
+
1015
+ @unpack_inputs
1016
+ @add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
1017
+ @replace_return_docstrings(output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC)
1018
+ def call(
1019
+ self,
1020
+ pixel_values: tf.Tensor | None = None,
1021
+ labels: tf.Tensor | None = None,
1022
+ output_hidden_states: Optional[bool] = None,
1023
+ return_dict: Optional[bool] = None,
1024
+ training: Optional[bool] = False,
1025
+ ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
1026
+ r"""
1027
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1028
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1029
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1030
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1031
+
1032
+ Returns:
1033
+
1034
+ Examples:
1035
+
1036
+ ```python
1037
+ >>> from transformers import AutoImageProcessor, TFCvtForImageClassification
1038
+ >>> import tensorflow as tf
1039
+ >>> from PIL import Image
1040
+ >>> import requests
1041
+
1042
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1043
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1044
+
1045
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/cvt-13")
1046
+ >>> model = TFCvtForImageClassification.from_pretrained("microsoft/cvt-13")
1047
+
1048
+ >>> inputs = image_processor(images=image, return_tensors="tf")
1049
+ >>> outputs = model(**inputs)
1050
+ >>> logits = outputs.logits
1051
+ >>> # model predicts one of the 1000 ImageNet classes
1052
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
1053
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
1054
+ ```"""
1055
+
1056
+ outputs = self.cvt(
1057
+ pixel_values,
1058
+ output_hidden_states=output_hidden_states,
1059
+ return_dict=return_dict,
1060
+ training=training,
1061
+ )
1062
+
1063
+ sequence_output = outputs[0]
1064
+ cls_token = outputs[1]
1065
+ if self.config.cls_token[-1]:
1066
+ sequence_output = self.layernorm(cls_token)
1067
+ else:
1068
+ # rearrange "batch_size, num_channels, height, width -> batch_size, (height*width), num_channels"
1069
+ batch_size, num_channels, height, width = shape_list(sequence_output)
1070
+ sequence_output = tf.reshape(sequence_output, shape=(batch_size, num_channels, height * width))
1071
+ sequence_output = tf.transpose(sequence_output, perm=(0, 2, 1))
1072
+ sequence_output = self.layernorm(sequence_output)
1073
+
1074
+ sequence_output_mean = tf.reduce_mean(sequence_output, axis=1)
1075
+ logits = self.classifier(sequence_output_mean)
1076
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1077
+
1078
+ if not return_dict:
1079
+ output = (logits,) + outputs[2:]
1080
+ return ((loss,) + output) if loss is not None else output
1081
+
1082
+ return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
1083
+
1084
+ def build(self, input_shape=None):
1085
+ if self.built:
1086
+ return
1087
+ self.built = True
1088
+ if getattr(self, "cvt", None) is not None:
1089
+ with tf.name_scope(self.cvt.name):
1090
+ self.cvt.build(None)
1091
+ if getattr(self, "layernorm", None) is not None:
1092
+ with tf.name_scope(self.layernorm.name):
1093
+ self.layernorm.build([None, None, self.config.embed_dim[-1]])
1094
+ if getattr(self, "classifier", None) is not None:
1095
+ if hasattr(self.classifier, "name"):
1096
+ with tf.name_scope(self.classifier.name):
1097
+ self.classifier.build([None, None, self.config.embed_dim[-1]])
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Microsoft and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import TYPE_CHECKING
17
+
18
+ from ...utils import (
19
+ OptionalDependencyNotAvailable,
20
+ _LazyModule,
21
+ is_sentencepiece_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"],
29
+ }
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_phi"] = [
38
+ "PHI_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "PhiPreTrainedModel",
40
+ "PhiModel",
41
+ "PhiForCausalLM",
42
+ "PhiForSequenceClassification",
43
+ "PhiForTokenClassification",
44
+ ]
45
+
46
+
47
+ if TYPE_CHECKING:
48
+ from .configuration_phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_phi import (
57
+ PHI_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ PhiForCausalLM,
59
+ PhiForSequenceClassification,
60
+ PhiForTokenClassification,
61
+ PhiModel,
62
+ PhiPreTrainedModel,
63
+ )
64
+
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/configuration_phi.cpython-310.pyc ADDED
Binary file (7.78 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/__pycache__/modeling_phi.cpython-310.pyc ADDED
Binary file (42.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/configuration_phi.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ Phi model configuration"""
17
+
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class PhiConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the Phi
34
+ [microsoft/phi-1](https://huggingface.co/microsoft/phi-1).
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 51200):
41
+ Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`PhiModel`].
43
+ hidden_size (`int`, *optional*, defaults to 2048):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 8192):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 24):
48
+ Number of hidden layers in the Transformer decoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer decoder.
51
+ num_key_value_heads (`int`, *optional*):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
+ `num_attention_heads`.
59
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
60
+ Dropout probability for mlp outputs.
61
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the embeddings.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio after computing the attention scores.
65
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
66
+ The non-linear activation function (function or string) in the decoder.
67
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
68
+ The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048
69
+ tokens.
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
73
+ The epsilon used by the rms normalization layers.
74
+ use_cache (`bool`, *optional*, defaults to `True`):
75
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
76
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
77
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
78
+ Whether to tie weight embeddings
79
+ rope_theta (`float`, *optional*, defaults to 10000.0):
80
+ The base period of the RoPE embeddings.
81
+ rope_scaling (`Dict`, *optional*):
82
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
83
+ strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format
84
+ is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
85
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
86
+ these scaling strategies behave:
87
+ https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This
88
+ is an experimental feature, subject to breaking API changes in future versions.
89
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
90
+ Percentage of the query and keys which will have rotary embedding.
91
+ qk_layernorm (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to normalize the Queries and Keys after projecting the hidden states.
93
+ bos_token_id (`int`, *optional*, defaults to 1):
94
+ Denotes beginning of sequences token id.
95
+ eos_token_id (`int`, *optional*, defaults to 2):
96
+ Denotes end of sequences token id.
97
+
98
+ Example:
99
+
100
+ ```python
101
+ >>> from transformers import PhiModel, PhiConfig
102
+
103
+ >>> # Initializing a Phi-1 style configuration
104
+ >>> configuration = PhiConfig.from_pretrained("microsoft/phi-1")
105
+
106
+ >>> # Initializing a model from the configuration
107
+ >>> model = PhiModel(configuration)
108
+
109
+ >>> # Accessing the model configuration
110
+ >>> configuration = model.config
111
+ ```"""
112
+
113
+ model_type = "phi"
114
+ keys_to_ignore_at_inference = ["past_key_values"]
115
+
116
+ def __init__(
117
+ self,
118
+ vocab_size=51200,
119
+ hidden_size=2048,
120
+ intermediate_size=8192,
121
+ num_hidden_layers=24,
122
+ num_attention_heads=32,
123
+ num_key_value_heads=None,
124
+ resid_pdrop=0.0,
125
+ embd_pdrop=0.0,
126
+ attention_dropout=0.0,
127
+ hidden_act="gelu_new",
128
+ max_position_embeddings=2048,
129
+ initializer_range=0.02,
130
+ layer_norm_eps=1e-5,
131
+ use_cache=True,
132
+ tie_word_embeddings=False,
133
+ rope_theta=10000.0,
134
+ rope_scaling=None,
135
+ partial_rotary_factor=0.5,
136
+ qk_layernorm=False,
137
+ bos_token_id=1,
138
+ eos_token_id=2,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.hidden_size = hidden_size
143
+ self.intermediate_size = intermediate_size
144
+ self.num_hidden_layers = num_hidden_layers
145
+ self.num_attention_heads = num_attention_heads
146
+
147
+ if num_key_value_heads is None:
148
+ num_key_value_heads = num_attention_heads
149
+
150
+ self.num_key_value_heads = num_key_value_heads
151
+ self.resid_pdrop = resid_pdrop
152
+ self.embd_pdrop = embd_pdrop
153
+ self.attention_dropout = attention_dropout
154
+ self.hidden_act = hidden_act
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.initializer_range = initializer_range
157
+ self.layer_norm_eps = layer_norm_eps
158
+ self.use_cache = use_cache
159
+ self.rope_theta = rope_theta
160
+ self.rope_scaling = rope_scaling
161
+ self.partial_rotary_factor = partial_rotary_factor
162
+ self.qk_layernorm = qk_layernorm
163
+ self._rope_scaling_validation()
164
+
165
+ super().__init__(
166
+ bos_token_id=bos_token_id,
167
+ eos_token_id=eos_token_id,
168
+ tie_word_embeddings=tie_word_embeddings,
169
+ **kwargs,
170
+ )
171
+
172
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
173
+ def _rope_scaling_validation(self):
174
+ """
175
+ Validate the `rope_scaling` configuration.
176
+ """
177
+ if self.rope_scaling is None:
178
+ return
179
+
180
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
181
+ raise ValueError(
182
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
183
+ )
184
+ rope_scaling_type = self.rope_scaling.get("type", None)
185
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
186
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
187
+ raise ValueError(
188
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
189
+ )
190
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
191
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/convert_phi_weights_to_hf.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Weights conversion script for Phi
18
+
19
+ This script downloads both Phi-1 and Phi-1.5 checkpoints to "checkpoint_path" and then converts the weights to
20
+ HugfgingFace model's format and saves them in "pytorch_dump_folder_path".
21
+
22
+ Example : $python ./convert_phi_weights_to_hf.py --model_name "microsoft/phi-2" --pytorch_dump_folder ./dump_folder/ --checkpoint_path ./ckpt_path/
23
+ """
24
+
25
+ import argparse
26
+ import gc
27
+ import os
28
+
29
+ import safetensors
30
+ import torch
31
+ from huggingface_hub import hf_hub_download
32
+
33
+ from transformers import PhiConfig, PhiForCausalLM
34
+
35
+
36
+ _MODELS = {
37
+ "microsoft/phi-1": ["https://huggingface.co/microsoft/phi-1/blob/main/pytorch_model.bin"],
38
+ "microsoft/phi-1_5": ["https://huggingface.co/microsoft/phi-1_5/blob/main/pytorch_model.bin"],
39
+ "microsoft/phi-2": [
40
+ "https://huggingface.co/microsoft/phi-2/blob/main/model-00001-of-00002.safetensors",
41
+ "https://huggingface.co/microsoft/phi-2/blob/main/model-00002-of-00002.safetensors",
42
+ ],
43
+ }
44
+
45
+ PHI_MAPPING = {
46
+ "transformer.embd.wte.weight": "model.embed_tokens.weight",
47
+ "lm_head.linear": "lm_head",
48
+ "lm_head.ln": "model.final_layernorm",
49
+ "layers": "model.layers",
50
+ "transformer": "model",
51
+ ".h.": ".layers.",
52
+ "ln": "input_layernorm",
53
+ "mixer": "self_attn",
54
+ "Wqkv": "query_key_value",
55
+ "out_proj": "dense",
56
+ }
57
+
58
+
59
+ def convert_weights(original_weights, mapping, config):
60
+ converted_weights = {}
61
+ original_weights_keys = sorted(original_weights.keys())
62
+
63
+ for original_weights_key in original_weights_keys:
64
+ new_key = original_weights_key
65
+
66
+ if "rotary_emb" in new_key:
67
+ continue
68
+
69
+ if "Wqkv" in new_key:
70
+ if "weight" in new_key:
71
+ weight = original_weights[new_key]
72
+ weights_shape = weight.shape
73
+ weight = (
74
+ weight.view(3, config.num_attention_heads, -1, config.hidden_size)
75
+ .transpose(0, 1)
76
+ .reshape(*weights_shape)
77
+ )
78
+ original_weights[new_key] = weight
79
+ elif "bias" in new_key:
80
+ bias = original_weights[new_key]
81
+ bias_shape = bias.shape
82
+ bias = bias.view(3, config.num_attention_heads, -1).transpose(0, 1).reshape(*bias_shape)
83
+ original_weights[new_key] = bias
84
+
85
+ for k, v in mapping.items():
86
+ if k in new_key:
87
+ new_key = new_key.replace(k, v)
88
+
89
+ converted_weights[new_key] = original_weights.pop(original_weights_key)
90
+
91
+ return converted_weights
92
+
93
+
94
+ def _download(url: str, root: str):
95
+ repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}"
96
+ filename = f"{url.split('/')[-1]}"
97
+ hf_hub_download(
98
+ repo_id=repo_id,
99
+ filename=filename,
100
+ force_filename=root,
101
+ local_dir_use_symlinks=False,
102
+ )
103
+
104
+
105
+ def convert_phi_weights(
106
+ model_name, checkpoint_path, pytorch_dump_folder_path, use_cuda, save_weights_directly, _MODELS
107
+ ):
108
+ _MODELS = _MODELS if model_name not in _MODELS.keys() else {model_name: _MODELS.get(model_name)}
109
+ device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
110
+ for model_name, model_url in _MODELS.items():
111
+ converted_checkpoint = {}
112
+ model_checkpoint = {}
113
+
114
+ # for phi-2 the weights are stored in 2 different safetensors file so we need to iterate over that list and download one at a time
115
+ for model_each_url in model_url:
116
+ model_path = os.path.join(checkpoint_path, model_name + "_" + model_each_url.split("/")[-1])
117
+ if not os.path.exists(model_path):
118
+ print(f"\n{model_name} was not found! Downloading it to {model_path}")
119
+ _download(url=model_each_url, root=model_path)
120
+
121
+ if model_path.endswith("safetensors"):
122
+ loaded_weights = safetensors.torch.load_file(model_path, device=device)
123
+ else:
124
+ loaded_weights = torch.load(model_path, map_location=device)
125
+ model_checkpoint.update(**loaded_weights)
126
+
127
+ model_type = model_name.split("/")[1] # phi-1 or phi-1_5 or phi-2
128
+
129
+ # init the config for phi-1 and phi-1.5
130
+ config = PhiConfig()
131
+ # if we are dealing with phi-2 then update the config
132
+ if model_type == "phi-2":
133
+ config.hidden_size = 2560
134
+ config.intermediate_size = 10240
135
+ config.num_hidden_layers = 32
136
+ config.resid_pdrop = 0.1
137
+ config.partial_rotary_factor = 0.4
138
+ config.num_hidden_layers = 32
139
+ config.torch_dtype = "float16"
140
+
141
+ # Converting the weights
142
+ converted_checkpoint.update(**convert_weights(model_checkpoint, PHI_MAPPING, config))
143
+
144
+ # Save either the whole model or the converted weights
145
+ if save_weights_directly:
146
+ save_weights_path = os.path.join(pytorch_dump_folder_path, model_type + "_pytorch_model.bin")
147
+ torch.save(converted_checkpoint, save_weights_path)
148
+ print(f"Model weights saved at {save_weights_path}!")
149
+
150
+ else:
151
+ model = PhiForCausalLM(config).to(device)
152
+ model.load_state_dict(converted_checkpoint, strict=True)
153
+ save_model_path = os.path.join(pytorch_dump_folder_path, model_type)
154
+ model.save_pretrained(save_model_path)
155
+ print(f"Model saved at {save_model_path}!")
156
+
157
+ # release GPU memory for the 2nd model if cuda was used.
158
+ del config, model
159
+
160
+ # release GPU memory for the 2nd model if cuda was used.
161
+ del model_checkpoint, converted_checkpoint
162
+ if use_cuda:
163
+ torch.cuda.empty_cache()
164
+ gc.collect()
165
+
166
+
167
+ if __name__ == "__main__":
168
+ parser = argparse.ArgumentParser()
169
+ # # Required parameters
170
+ parser.add_argument(
171
+ "--model_name",
172
+ type=str,
173
+ help="Name of the model to convert. (Please enter one of the following: phi-1, phi-1_5, phi-2). If nothing is provided, all models will be converted.",
174
+ default=None,
175
+ )
176
+ parser.add_argument(
177
+ "--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)"
178
+ )
179
+ parser.add_argument(
180
+ "--pytorch_dump_folder_path",
181
+ default=None,
182
+ type=str,
183
+ help="Path to the output PyTorch model. (Please enter full path)",
184
+ )
185
+ parser.add_argument(
186
+ "--use_cuda",
187
+ default=False,
188
+ type=bool,
189
+ help="Whether to load the weights on GPU during conversion or not, False by default",
190
+ )
191
+ parser.add_argument(
192
+ "--save_weights_directly",
193
+ default=True,
194
+ type=bool,
195
+ help="Whether to save the weights directly after conversion or load the weight to the Phi model and then save "
196
+ "the Phi model along with weights. True by default",
197
+ )
198
+
199
+ args = parser.parse_args()
200
+ convert_phi_weights(
201
+ args.model_name,
202
+ args.checkpoint_path,
203
+ args.pytorch_dump_folder_path,
204
+ args.use_cuda,
205
+ args.save_weights_directly,
206
+ _MODELS,
207
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/phi/modeling_phi.py ADDED
@@ -0,0 +1,1489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ PyTorch Phi model."""
17
+
18
+
19
+ import math
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from packaging import version
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...cache_utils import Cache, DynamicCache
31
+ from ...modeling_attn_mask_utils import (
32
+ _prepare_4d_causal_attention_mask,
33
+ _prepare_4d_causal_attention_mask_for_sdpa,
34
+ )
35
+ from ...modeling_outputs import (
36
+ BaseModelOutputWithPast,
37
+ CausalLMOutputWithPast,
38
+ SequenceClassifierOutputWithPast,
39
+ TokenClassifierOutput,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...utils import (
43
+ add_code_sample_docstrings,
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ get_torch_version,
47
+ is_flash_attn_2_available,
48
+ is_flash_attn_greater_or_equal_2_10,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from .configuration_phi import PhiConfig
53
+
54
+
55
+ if is_flash_attn_2_available():
56
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
57
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ _CHECKPOINT_FOR_DOC = "microsoft/phi-1"
63
+ _CONFIG_FOR_DOC = "PhiConfig"
64
+
65
+
66
+ from ..deprecated._archive_maps import PHI_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
70
+ def _get_unpad_data(attention_mask):
71
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
72
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
73
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
74
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
75
+ return (
76
+ indices,
77
+ cu_seqlens,
78
+ max_seqlen_in_batch,
79
+ )
80
+
81
+
82
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Phi
83
+ class PhiRotaryEmbedding(nn.Module):
84
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
85
+ super().__init__()
86
+
87
+ self.dim = dim
88
+ self.max_position_embeddings = max_position_embeddings
89
+ self.base = base
90
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
91
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
92
+
93
+ # Build here to make `torch.jit.trace` work.
94
+ self._set_cos_sin_cache(
95
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
96
+ )
97
+
98
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
99
+ self.max_seq_len_cached = seq_len
100
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
101
+
102
+ freqs = torch.outer(t, self.inv_freq)
103
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
104
+ emb = torch.cat((freqs, freqs), dim=-1)
105
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
106
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
107
+
108
+ def forward(self, x, seq_len=None):
109
+ # x: [bs, num_attention_heads, seq_len, head_size]
110
+ if seq_len > self.max_seq_len_cached:
111
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
112
+
113
+ return (
114
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
115
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
116
+ )
117
+
118
+
119
+ # Copied from transformers.models.falcon.modeling_falcon.FalconLinearScalingRotaryEmbedding with Falcon->Phi
120
+ class PhiLinearScalingRotaryEmbedding(PhiRotaryEmbedding):
121
+ """PhiRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
122
+
123
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
124
+ self.scaling_factor = scaling_factor
125
+ super().__init__(dim, max_position_embeddings, base, device)
126
+
127
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
128
+ self.max_seq_len_cached = seq_len
129
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
130
+ t = t / self.scaling_factor
131
+
132
+ freqs = torch.outer(t, self.inv_freq)
133
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
134
+ emb = torch.cat((freqs, freqs), dim=-1)
135
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
136
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
137
+
138
+
139
+ # Copied from transformers.models.falcon.modeling_falcon.FalconDynamicNTKScalingRotaryEmbedding with Falcon->Phi
140
+ class PhiDynamicNTKScalingRotaryEmbedding(PhiRotaryEmbedding):
141
+ """PhiRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
142
+
143
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
144
+ self.scaling_factor = scaling_factor
145
+ super().__init__(dim, max_position_embeddings, base, device)
146
+
147
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
148
+ self.max_seq_len_cached = seq_len
149
+
150
+ if seq_len > self.max_position_embeddings:
151
+ base = self.base * (
152
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
153
+ ) ** (self.dim / (self.dim - 2))
154
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
155
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
156
+
157
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
158
+
159
+ freqs = torch.outer(t, self.inv_freq)
160
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
161
+ emb = torch.cat((freqs, freqs), dim=-1)
162
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
163
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
164
+
165
+
166
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
167
+ def rotate_half(x):
168
+ """Rotates half the hidden dims of the input."""
169
+ x1 = x[..., : x.shape[-1] // 2]
170
+ x2 = x[..., x.shape[-1] // 2 :]
171
+ return torch.cat((-x2, x1), dim=-1)
172
+
173
+
174
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
175
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
176
+ """Applies Rotary Position Embedding to the query and key tensors.
177
+
178
+ Args:
179
+ q (`torch.Tensor`): The query tensor.
180
+ k (`torch.Tensor`): The key tensor.
181
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
182
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
183
+ position_ids (`torch.Tensor`):
184
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
185
+ used to pass offsetted position ids when working with a KV-cache.
186
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
187
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
188
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
189
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
190
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
191
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
192
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
193
+ Returns:
194
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
195
+ """
196
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
197
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
198
+ q_embed = (q * cos) + (rotate_half(q) * sin)
199
+ k_embed = (k * cos) + (rotate_half(k) * sin)
200
+ return q_embed, k_embed
201
+
202
+
203
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Phi
204
+ class PhiMLP(nn.Module):
205
+ def __init__(self, config):
206
+ super().__init__()
207
+ self.config = config
208
+ self.activation_fn = ACT2FN[config.hidden_act]
209
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
210
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
211
+
212
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
213
+ hidden_states = self.fc1(hidden_states)
214
+ hidden_states = self.activation_fn(hidden_states)
215
+ hidden_states = self.fc2(hidden_states)
216
+ return hidden_states
217
+
218
+
219
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
220
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
221
+ """
222
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
223
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
224
+ """
225
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
226
+ if n_rep == 1:
227
+ return hidden_states
228
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
229
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
230
+
231
+
232
+ class PhiAttention(nn.Module):
233
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
234
+
235
+ def __init__(self, config: PhiConfig, layer_idx: Optional[int] = None):
236
+ super().__init__()
237
+ self.config = config
238
+ self.layer_idx = layer_idx
239
+ if layer_idx is None:
240
+ logger.warning_once(
241
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
242
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
243
+ "when creating this class."
244
+ )
245
+
246
+ self.attention_dropout = config.attention_dropout
247
+ self.hidden_size = config.hidden_size
248
+ self.num_heads = config.num_attention_heads
249
+ self.head_dim = self.hidden_size // self.num_heads
250
+ self.num_key_value_heads = config.num_key_value_heads
251
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
252
+ self.max_position_embeddings = config.max_position_embeddings
253
+ self.rope_theta = config.rope_theta
254
+ self.partial_rotary_factor = config.partial_rotary_factor
255
+ self.is_causal = True
256
+
257
+ if (self.head_dim * self.num_heads) != self.hidden_size:
258
+ raise ValueError(
259
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
260
+ f" and `num_heads`: {self.num_heads})."
261
+ )
262
+
263
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
264
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
265
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
266
+ self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True)
267
+
268
+ self.qk_layernorm = config.qk_layernorm
269
+ if self.qk_layernorm:
270
+ self.q_layernorm = nn.LayerNorm(
271
+ config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
272
+ )
273
+ self.k_layernorm = nn.LayerNorm(
274
+ config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
275
+ )
276
+
277
+ self._init_rope()
278
+
279
+ def _init_rope(self):
280
+ if self.config.rope_scaling is None:
281
+ self.rotary_emb = PhiRotaryEmbedding(
282
+ int(self.partial_rotary_factor * self.head_dim),
283
+ max_position_embeddings=self.max_position_embeddings,
284
+ base=self.rope_theta,
285
+ )
286
+ else:
287
+ scaling_type = self.config.rope_scaling["type"]
288
+ scaling_factor = self.config.rope_scaling["factor"]
289
+ if scaling_type == "linear":
290
+ self.rotary_emb = PhiLinearScalingRotaryEmbedding(
291
+ int(self.partial_rotary_factor * self.head_dim),
292
+ max_position_embeddings=self.max_position_embeddings,
293
+ scaling_factor=scaling_factor,
294
+ base=self.rope_theta,
295
+ )
296
+ elif scaling_type == "dynamic":
297
+ self.rotary_emb = PhiDynamicNTKScalingRotaryEmbedding(
298
+ int(self.partial_rotary_factor * self.head_dim),
299
+ max_position_embeddings=self.max_position_embeddings,
300
+ scaling_factor=scaling_factor,
301
+ base=self.rope_theta,
302
+ )
303
+ else:
304
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
305
+
306
+ def forward(
307
+ self,
308
+ hidden_states: torch.Tensor,
309
+ attention_mask: Optional[torch.Tensor] = None,
310
+ position_ids: Optional[torch.LongTensor] = None,
311
+ past_key_value: Optional[Cache] = None,
312
+ output_attentions: bool = False,
313
+ use_cache: bool = False,
314
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
315
+ bsz, q_len, _ = hidden_states.size()
316
+
317
+ query_states = self.q_proj(hidden_states)
318
+ key_states = self.k_proj(hidden_states)
319
+ value_states = self.v_proj(hidden_states)
320
+
321
+ if self.qk_layernorm:
322
+ query_states = self.q_layernorm(query_states)
323
+ key_states = self.k_layernorm(key_states)
324
+
325
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
326
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
327
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
328
+
329
+ kv_seq_len = key_states.shape[-2]
330
+ if past_key_value is not None:
331
+ if self.layer_idx is None:
332
+ raise ValueError(
333
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
334
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
335
+ "with a layer index."
336
+ )
337
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
338
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
339
+
340
+ # Partial rotary embedding
341
+ query_rot, query_pass = (
342
+ query_states[..., : self.rotary_emb.dim],
343
+ query_states[..., self.rotary_emb.dim :],
344
+ )
345
+ key_rot, key_pass = (
346
+ key_states[..., : self.rotary_emb.dim],
347
+ key_states[..., self.rotary_emb.dim :],
348
+ )
349
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
350
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
351
+
352
+ # [batch_size, seq_length, num_heads, head_dim]
353
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
354
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
355
+
356
+ if past_key_value is not None:
357
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
358
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
359
+
360
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
361
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
362
+
363
+ # Queries and keys upcast to fp32 is required by Phi-2 to avoid overflow
364
+ attn_weights = torch.matmul(
365
+ query_states.to(torch.float32), key_states.to(torch.float32).transpose(2, 3)
366
+ ) / math.sqrt(self.head_dim)
367
+
368
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
369
+ raise ValueError(
370
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
371
+ f" {attn_weights.size()}"
372
+ )
373
+
374
+ if attention_mask is not None:
375
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
376
+ raise ValueError(
377
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
378
+ )
379
+ attn_weights = attn_weights + attention_mask
380
+
381
+ # upcast attention to fp32
382
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
383
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
384
+
385
+ attn_output = torch.matmul(attn_weights, value_states)
386
+
387
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
388
+ raise ValueError(
389
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
390
+ f" {attn_output.size()}"
391
+ )
392
+
393
+ attn_output = attn_output.transpose(1, 2).contiguous()
394
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
395
+
396
+ attn_output = self.dense(attn_output)
397
+
398
+ if not output_attentions:
399
+ attn_weights = None
400
+
401
+ return attn_output, attn_weights, past_key_value
402
+
403
+
404
+ class PhiFlashAttention2(PhiAttention):
405
+ """
406
+ Phi flash attention module. This module inherits from `PhiAttention` as the weights of the module stays
407
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
408
+ flash attention and deal with padding tokens in case the input contains any of them.
409
+ """
410
+
411
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
412
+ def __init__(self, *args, **kwargs):
413
+ super().__init__(*args, **kwargs)
414
+
415
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
416
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
417
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
418
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
419
+
420
+ def forward(
421
+ self,
422
+ hidden_states: torch.Tensor,
423
+ attention_mask: Optional[torch.LongTensor] = None,
424
+ position_ids: Optional[torch.LongTensor] = None,
425
+ past_key_value: Optional[Cache] = None,
426
+ output_attentions: bool = False,
427
+ use_cache: bool = False,
428
+ **kwargs,
429
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
430
+ # PhiFlashAttention2 attention does not support output_attentions
431
+
432
+ output_attentions = False
433
+
434
+ bsz, q_len, _ = hidden_states.size()
435
+
436
+ query_states = self.q_proj(hidden_states)
437
+ key_states = self.k_proj(hidden_states)
438
+ value_states = self.v_proj(hidden_states)
439
+
440
+ if self.qk_layernorm:
441
+ query_states = self.q_layernorm(query_states)
442
+ key_states = self.k_layernorm(key_states)
443
+
444
+ # Flash attention requires the input to have the shape
445
+ # batch_size x seq_length x head_dim x hidden_dim
446
+ # therefore we just need to keep the original shape
447
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
448
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
449
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
450
+
451
+ kv_seq_len = key_states.shape[-2]
452
+ if past_key_value is not None:
453
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
454
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
455
+
456
+ # Partial rotary embedding
457
+ query_rot, query_pass = (
458
+ query_states[..., : self.rotary_emb.dim],
459
+ query_states[..., self.rotary_emb.dim :],
460
+ )
461
+ key_rot, key_pass = (
462
+ key_states[..., : self.rotary_emb.dim],
463
+ key_states[..., self.rotary_emb.dim :],
464
+ )
465
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
466
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
467
+
468
+ # [batch_size, seq_length, num_heads, head_dim]
469
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
470
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
471
+
472
+ if past_key_value is not None:
473
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
474
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
475
+
476
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
477
+ # to be able to avoid many of these transpose/reshape/view.
478
+ query_states = query_states.transpose(1, 2)
479
+ key_states = key_states.transpose(1, 2)
480
+ value_states = value_states.transpose(1, 2)
481
+
482
+ attn_dropout = self.attention_dropout if self.training else 0.0
483
+
484
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
485
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
486
+ # cast them back in the correct dtype just to be sure everything works as expected.
487
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
488
+ # in fp32.
489
+
490
+ if query_states.dtype == torch.float32:
491
+ if torch.is_autocast_enabled():
492
+ target_dtype = torch.get_autocast_gpu_dtype()
493
+ # Handle the case where the model is quantized
494
+ elif hasattr(self.config, "_pre_quantization_dtype"):
495
+ target_dtype = self.config._pre_quantization_dtype
496
+ else:
497
+ target_dtype = self.q_proj.weight.dtype
498
+
499
+ logger.warning_once(
500
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
501
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
502
+ f" {target_dtype}."
503
+ )
504
+
505
+ query_states = query_states.to(target_dtype)
506
+ key_states = key_states.to(target_dtype)
507
+ value_states = value_states.to(target_dtype)
508
+
509
+ attn_output = self._flash_attention_forward(
510
+ query_states, key_states, value_states, attention_mask, q_len, dropout=attn_dropout, softmax_scale=None
511
+ )
512
+
513
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
514
+ attn_output = self.dense(attn_output)
515
+
516
+ if not output_attentions:
517
+ attn_weights = None
518
+
519
+ return attn_output, attn_weights, past_key_value
520
+
521
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
522
+ def _flash_attention_forward(
523
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
524
+ ):
525
+ """
526
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
527
+ first unpad the input, then computes the attention scores and pad the final attention scores.
528
+
529
+ Args:
530
+ query_states (`torch.Tensor`):
531
+ Input query states to be passed to Flash Attention API
532
+ key_states (`torch.Tensor`):
533
+ Input key states to be passed to Flash Attention API
534
+ value_states (`torch.Tensor`):
535
+ Input value states to be passed to Flash Attention API
536
+ attention_mask (`torch.Tensor`):
537
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
538
+ position of padding tokens and 1 for the position of non-padding tokens.
539
+ dropout (`float`):
540
+ Attention dropout
541
+ softmax_scale (`float`, *optional*):
542
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
543
+ """
544
+ if not self._flash_attn_uses_top_left_mask:
545
+ causal = self.is_causal
546
+ else:
547
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
548
+ causal = self.is_causal and query_length != 1
549
+
550
+ # Contains at least one padding token in the sequence
551
+ if attention_mask is not None:
552
+ batch_size = query_states.shape[0]
553
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
554
+ query_states, key_states, value_states, attention_mask, query_length
555
+ )
556
+
557
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
558
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
559
+
560
+ attn_output_unpad = flash_attn_varlen_func(
561
+ query_states,
562
+ key_states,
563
+ value_states,
564
+ cu_seqlens_q=cu_seqlens_q,
565
+ cu_seqlens_k=cu_seqlens_k,
566
+ max_seqlen_q=max_seqlen_in_batch_q,
567
+ max_seqlen_k=max_seqlen_in_batch_k,
568
+ dropout_p=dropout,
569
+ softmax_scale=softmax_scale,
570
+ causal=causal,
571
+ )
572
+
573
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
574
+ else:
575
+ attn_output = flash_attn_func(
576
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
577
+ )
578
+
579
+ return attn_output
580
+
581
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
582
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
583
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
584
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
585
+
586
+ key_layer = index_first_axis(
587
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
588
+ )
589
+ value_layer = index_first_axis(
590
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
591
+ )
592
+ if query_length == kv_seq_len:
593
+ query_layer = index_first_axis(
594
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
595
+ )
596
+ cu_seqlens_q = cu_seqlens_k
597
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
598
+ indices_q = indices_k
599
+ elif query_length == 1:
600
+ max_seqlen_in_batch_q = 1
601
+ cu_seqlens_q = torch.arange(
602
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
603
+ ) # There is a memcpy here, that is very bad.
604
+ indices_q = cu_seqlens_q[:-1]
605
+ query_layer = query_layer.squeeze(1)
606
+ else:
607
+ # The -q_len: slice assumes left padding.
608
+ attention_mask = attention_mask[:, -query_length:]
609
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
610
+
611
+ return (
612
+ query_layer,
613
+ key_layer,
614
+ value_layer,
615
+ indices_q,
616
+ (cu_seqlens_q, cu_seqlens_k),
617
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
618
+ )
619
+
620
+
621
+ class PhiSdpaAttention(PhiAttention):
622
+ def __init__(self, *args, **kwargs):
623
+ super().__init__(*args, **kwargs)
624
+ self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0")
625
+
626
+ """
627
+ SDPA attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
628
+ `PhiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
629
+ SDPA API.
630
+ """
631
+
632
+ # Adapted from PhiAttention.forward
633
+ def forward(
634
+ self,
635
+ hidden_states: torch.Tensor,
636
+ attention_mask: Optional[torch.Tensor] = None,
637
+ position_ids: Optional[torch.LongTensor] = None,
638
+ past_key_value: Optional[Cache] = None,
639
+ output_attentions: bool = False,
640
+ use_cache: bool = False,
641
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
642
+ if output_attentions:
643
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
644
+ logger.warning_once(
645
+ "PhiModel is using PhiSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not "
646
+ "support `output_attentions=True`. Falling back to the manual attention implementation, but specifying "
647
+ "the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can "
648
+ 'be removed using the argument `attn_implementation="eager"` when loading the model.'
649
+ )
650
+ return super().forward(
651
+ hidden_states=hidden_states,
652
+ attention_mask=attention_mask,
653
+ position_ids=position_ids,
654
+ past_key_value=past_key_value,
655
+ output_attentions=output_attentions,
656
+ use_cache=use_cache,
657
+ )
658
+
659
+ bsz, q_len, _ = hidden_states.size()
660
+
661
+ query_states = self.q_proj(hidden_states)
662
+ key_states = self.k_proj(hidden_states)
663
+ value_states = self.v_proj(hidden_states)
664
+
665
+ if self.qk_layernorm:
666
+ query_states = self.q_layernorm(query_states)
667
+ key_states = self.k_layernorm(key_states)
668
+
669
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
670
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
671
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
672
+
673
+ kv_seq_len = key_states.shape[-2]
674
+ if past_key_value is not None:
675
+ if self.layer_idx is None:
676
+ raise ValueError(
677
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
678
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
679
+ "with a layer index."
680
+ )
681
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
682
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
683
+
684
+ # Partial rotary embedding
685
+ query_rot, query_pass = (
686
+ query_states[..., : self.rotary_emb.dim],
687
+ query_states[..., self.rotary_emb.dim :],
688
+ )
689
+ key_rot, key_pass = (
690
+ key_states[..., : self.rotary_emb.dim],
691
+ key_states[..., self.rotary_emb.dim :],
692
+ )
693
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
694
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
695
+
696
+ # [batch_size, seq_length, num_heads, head_dim]
697
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
698
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
699
+
700
+ if past_key_value is not None:
701
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
702
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
703
+
704
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
705
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
706
+
707
+ # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
708
+ # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
709
+ # Reference: https://github.com/pytorch/pytorch/issues/112577
710
+ if self.require_contiguous_qkv and query_states.device.type == "cuda" and attention_mask is not None:
711
+ query_states = query_states.contiguous()
712
+ key_states = key_states.contiguous()
713
+ value_states = value_states.contiguous()
714
+
715
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
716
+ query_states,
717
+ key_states,
718
+ value_states,
719
+ attn_mask=attention_mask,
720
+ dropout_p=self.attention_dropout if self.training else 0.0,
721
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
722
+ )
723
+
724
+ attn_output = attn_output.transpose(1, 2).contiguous()
725
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
726
+
727
+ attn_output = self.dense(attn_output)
728
+
729
+ return attn_output, None, past_key_value
730
+
731
+
732
+ PHI_ATTENTION_CLASSES = {
733
+ "eager": PhiAttention,
734
+ "flash_attention_2": PhiFlashAttention2,
735
+ "sdpa": PhiSdpaAttention,
736
+ }
737
+
738
+
739
+ class PhiDecoderLayer(nn.Module):
740
+ def __init__(self, config: PhiConfig, layer_idx: int):
741
+ super().__init__()
742
+ self.self_attn = PHI_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
743
+ self.mlp = PhiMLP(config)
744
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
745
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
746
+
747
+ def forward(
748
+ self,
749
+ hidden_states: torch.Tensor,
750
+ attention_mask: Optional[torch.Tensor] = None,
751
+ position_ids: Optional[torch.LongTensor] = None,
752
+ output_attentions: Optional[bool] = False,
753
+ use_cache: Optional[bool] = False,
754
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
755
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
756
+ """
757
+ Args:
758
+ hidden_states (`torch.FloatTensor`):
759
+ input to the layer of shape `(batch, seq_len, embed_dim)`
760
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
761
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
762
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
763
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
764
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
765
+ output_attentions (`bool`, *optional*):
766
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
767
+ returned tensors for more detail.
768
+ use_cache (`bool`, *optional*):
769
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
770
+ (see `past_key_values`).
771
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
772
+ """
773
+
774
+ residual = hidden_states
775
+
776
+ hidden_states = self.input_layernorm(hidden_states)
777
+
778
+ # Self Attention
779
+ attn_outputs, self_attn_weights, present_key_value = self.self_attn(
780
+ hidden_states=hidden_states,
781
+ attention_mask=attention_mask,
782
+ position_ids=position_ids,
783
+ past_key_value=past_key_value,
784
+ output_attentions=output_attentions,
785
+ use_cache=use_cache,
786
+ )
787
+ attn_outputs = self.resid_dropout(attn_outputs)
788
+
789
+ feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states))
790
+ hidden_states = attn_outputs + feed_forward_hidden_states + residual
791
+ outputs = (hidden_states,)
792
+
793
+ if output_attentions:
794
+ outputs += (self_attn_weights,)
795
+
796
+ if use_cache:
797
+ outputs += (present_key_value,)
798
+
799
+ return outputs
800
+
801
+
802
+ PHI_START_DOCSTRING = r"""
803
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
804
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
805
+ etc.)
806
+
807
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
808
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
809
+ and behavior.
810
+
811
+ Parameters:
812
+ config ([`PhiConfig`]):
813
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
814
+ load the weights associated with the model, only the configuration. Check out the
815
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
816
+ """
817
+
818
+
819
+ @add_start_docstrings(
820
+ "The bare Phi Model outputting raw hidden-states without any specific head on top.",
821
+ PHI_START_DOCSTRING,
822
+ )
823
+ class PhiPreTrainedModel(PreTrainedModel):
824
+ config_class = PhiConfig
825
+ base_model_prefix = "model"
826
+ supports_gradient_checkpointing = True
827
+ _no_split_modules = ["PhiDecoderLayer"]
828
+ _skip_keys_device_placement = "past_key_values"
829
+ _supports_flash_attn_2 = True
830
+ _supports_sdpa = True
831
+ _supports_cache_class = True
832
+
833
+ def _init_weights(self, module):
834
+ std = self.config.initializer_range
835
+ if isinstance(module, nn.Linear):
836
+ module.weight.data.normal_(mean=0.0, std=std)
837
+ if module.bias is not None:
838
+ module.bias.data.zero_()
839
+ elif isinstance(module, nn.Embedding):
840
+ module.weight.data.normal_(mean=0.0, std=std)
841
+ if module.padding_idx is not None:
842
+ module.weight.data[module.padding_idx].zero_()
843
+
844
+
845
+ PHI_INPUTS_DOCSTRING = r"""
846
+ Args:
847
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
848
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
849
+ it.
850
+
851
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
852
+ [`PreTrainedTokenizer.__call__`] for details.
853
+
854
+ [What are input IDs?](../glossary#input-ids)
855
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
856
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
857
+
858
+ - 1 for tokens that are **not masked**,
859
+ - 0 for tokens that are **masked**.
860
+
861
+ [What are attention masks?](../glossary#attention-mask)
862
+
863
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
864
+ [`PreTrainedTokenizer.__call__`] for details.
865
+
866
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
867
+ `past_key_values`).
868
+
869
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
870
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
871
+ information on the default strategy.
872
+
873
+ - 1 indicates the head is **not masked**,
874
+ - 0 indicates the head is **masked**.
875
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
876
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
877
+ config.n_positions - 1]`.
878
+
879
+ [What are position IDs?](../glossary#position-ids)
880
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
881
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
882
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
883
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
884
+
885
+ Two formats are allowed:
886
+ - a [`~cache_utils.Cache`] instance;
887
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
888
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
889
+ cache format.
890
+
891
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
892
+ legacy cache format will be returned.
893
+
894
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
895
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
896
+ of shape `(batch_size, sequence_length)`.
897
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
898
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
899
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
900
+ model's internal embedding lookup matrix.
901
+ use_cache (`bool`, *optional*):
902
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
903
+ `past_key_values`).
904
+ output_attentions (`bool`, *optional*):
905
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
906
+ tensors for more detail.
907
+ output_hidden_states (`bool`, *optional*):
908
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
909
+ more detail.
910
+ return_dict (`bool`, *optional*):
911
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
912
+ """
913
+
914
+
915
+ @add_start_docstrings(
916
+ "The bare Phi Model outputting raw hidden-states without any specific head on top.",
917
+ PHI_START_DOCSTRING,
918
+ )
919
+ class PhiModel(PhiPreTrainedModel):
920
+ """
921
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PhiDecoderLayer`]
922
+
923
+ Args:
924
+ config: PhiConfig
925
+ """
926
+
927
+ def __init__(self, config: PhiConfig):
928
+ super().__init__(config)
929
+ self.padding_idx = config.pad_token_id
930
+ self.vocab_size = config.vocab_size
931
+
932
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
933
+ self.embed_dropout = nn.Dropout(config.embd_pdrop)
934
+ self.layers = nn.ModuleList(
935
+ [PhiDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
936
+ )
937
+ self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
938
+
939
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
940
+ self._use_sdpa = config._attn_implementation == "sdpa"
941
+
942
+ self.gradient_checkpointing = False
943
+ # Initialize weights and apply final processing
944
+ self.post_init()
945
+
946
+ def get_input_embeddings(self):
947
+ return self.embed_tokens
948
+
949
+ def set_input_embeddings(self, value):
950
+ self.embed_tokens = value
951
+
952
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
953
+ def forward(
954
+ self,
955
+ input_ids: torch.LongTensor = None,
956
+ attention_mask: Optional[torch.Tensor] = None,
957
+ position_ids: Optional[torch.LongTensor] = None,
958
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
959
+ inputs_embeds: Optional[torch.FloatTensor] = None,
960
+ use_cache: Optional[bool] = None,
961
+ output_attentions: Optional[bool] = None,
962
+ output_hidden_states: Optional[bool] = None,
963
+ return_dict: Optional[bool] = None,
964
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
965
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
966
+ output_hidden_states = (
967
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
968
+ )
969
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
970
+
971
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
972
+
973
+ # retrieve input_ids and inputs_embeds
974
+ if input_ids is not None and inputs_embeds is not None:
975
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
976
+ elif input_ids is not None:
977
+ batch_size, seq_length = input_ids.shape[:2]
978
+ elif inputs_embeds is not None:
979
+ batch_size, seq_length = inputs_embeds.shape[:2]
980
+ else:
981
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
982
+
983
+ past_key_values_length = 0
984
+
985
+ if self.gradient_checkpointing and self.training:
986
+ if use_cache:
987
+ logger.warning_once(
988
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
989
+ )
990
+ use_cache = False
991
+
992
+ if use_cache:
993
+ use_legacy_cache = not isinstance(past_key_values, Cache)
994
+ if use_legacy_cache:
995
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
996
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
997
+
998
+ if position_ids is None:
999
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1000
+ position_ids = torch.arange(
1001
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1002
+ )
1003
+ position_ids = position_ids.unsqueeze(0)
1004
+
1005
+ if inputs_embeds is None:
1006
+ inputs_embeds = self.embed_tokens(input_ids)
1007
+
1008
+ inputs_embeds = self.embed_dropout(inputs_embeds)
1009
+
1010
+ # Attention mask.
1011
+ if self._use_flash_attention_2:
1012
+ # 2d mask is passed through the layers
1013
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1014
+ elif self._use_sdpa and not output_attentions:
1015
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1016
+ attention_mask,
1017
+ (batch_size, seq_length),
1018
+ inputs_embeds,
1019
+ past_key_values_length,
1020
+ )
1021
+ else:
1022
+ # 4d mask is passed through the layers
1023
+ attention_mask = _prepare_4d_causal_attention_mask(
1024
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1025
+ )
1026
+
1027
+ hidden_states = inputs_embeds
1028
+
1029
+ # decoder layers
1030
+ all_hidden_states = () if output_hidden_states else None
1031
+ all_self_attns = () if output_attentions else None
1032
+ next_decoder_cache = None
1033
+
1034
+ for decoder_layer in self.layers:
1035
+ if output_hidden_states:
1036
+ all_hidden_states += (hidden_states,)
1037
+
1038
+ if self.gradient_checkpointing and self.training:
1039
+ layer_outputs = self._gradient_checkpointing_func(
1040
+ decoder_layer.__call__,
1041
+ hidden_states,
1042
+ attention_mask,
1043
+ position_ids,
1044
+ past_key_values,
1045
+ output_attentions,
1046
+ )
1047
+ else:
1048
+ layer_outputs = decoder_layer(
1049
+ hidden_states,
1050
+ attention_mask=attention_mask,
1051
+ position_ids=position_ids,
1052
+ past_key_value=past_key_values,
1053
+ output_attentions=output_attentions,
1054
+ use_cache=use_cache,
1055
+ )
1056
+
1057
+ hidden_states = layer_outputs[0]
1058
+
1059
+ if use_cache:
1060
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1061
+
1062
+ if output_attentions:
1063
+ all_self_attns += (layer_outputs[1],)
1064
+
1065
+ hidden_states = self.final_layernorm(hidden_states)
1066
+
1067
+ # add hidden states from the last decoder layer
1068
+ if output_hidden_states:
1069
+ all_hidden_states += (hidden_states,)
1070
+
1071
+ next_cache = None
1072
+ if use_cache:
1073
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
1074
+ if not return_dict:
1075
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1076
+ return BaseModelOutputWithPast(
1077
+ last_hidden_state=hidden_states,
1078
+ past_key_values=next_cache,
1079
+ hidden_states=all_hidden_states,
1080
+ attentions=all_self_attns,
1081
+ )
1082
+
1083
+
1084
+ class PhiForCausalLM(PhiPreTrainedModel):
1085
+ _tied_weights_keys = ["lm_head.weight"]
1086
+
1087
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi,bias=False->bias=True
1088
+ def __init__(self, config):
1089
+ super().__init__(config)
1090
+ self.model = PhiModel(config)
1091
+ self.vocab_size = config.vocab_size
1092
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
1093
+
1094
+ # Initialize weights and apply final processing
1095
+ self.post_init()
1096
+
1097
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
1098
+ def get_input_embeddings(self):
1099
+ return self.model.embed_tokens
1100
+
1101
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
1102
+ def set_input_embeddings(self, value):
1103
+ self.model.embed_tokens = value
1104
+
1105
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
1106
+ def get_output_embeddings(self):
1107
+ return self.lm_head
1108
+
1109
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
1110
+ def set_output_embeddings(self, new_embeddings):
1111
+ self.lm_head = new_embeddings
1112
+
1113
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
1114
+ def set_decoder(self, decoder):
1115
+ self.model = decoder
1116
+
1117
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
1118
+ def get_decoder(self):
1119
+ return self.model
1120
+
1121
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
1122
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1123
+ def forward(
1124
+ self,
1125
+ input_ids: torch.LongTensor = None,
1126
+ attention_mask: Optional[torch.Tensor] = None,
1127
+ position_ids: Optional[torch.LongTensor] = None,
1128
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1129
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1130
+ labels: Optional[torch.LongTensor] = None,
1131
+ use_cache: Optional[bool] = None,
1132
+ output_attentions: Optional[bool] = None,
1133
+ output_hidden_states: Optional[bool] = None,
1134
+ return_dict: Optional[bool] = None,
1135
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1136
+ r"""
1137
+ Args:
1138
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1139
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1140
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1141
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1142
+
1143
+ Returns:
1144
+
1145
+ Example:
1146
+
1147
+ ```python
1148
+ >>> from transformers import AutoTokenizer, PhiForCausalLM
1149
+
1150
+ >>> model = PhiForCausalLM.from_pretrained("microsoft/phi-1")
1151
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1")
1152
+
1153
+ >>> prompt = "This is an example script ."
1154
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1155
+
1156
+ >>> # Generate
1157
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1158
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1159
+ 'This is an example script .\n\n\n\nfrom typing import List\n\ndef find_most_common_letter(words: List[str'
1160
+ ```"""
1161
+
1162
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1163
+ output_hidden_states = (
1164
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1165
+ )
1166
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1167
+
1168
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1169
+ outputs = self.model(
1170
+ input_ids=input_ids,
1171
+ attention_mask=attention_mask,
1172
+ position_ids=position_ids,
1173
+ past_key_values=past_key_values,
1174
+ inputs_embeds=inputs_embeds,
1175
+ use_cache=use_cache,
1176
+ output_attentions=output_attentions,
1177
+ output_hidden_states=output_hidden_states,
1178
+ return_dict=return_dict,
1179
+ )
1180
+
1181
+ hidden_states = outputs[0]
1182
+ logits = self.lm_head(hidden_states)
1183
+ logits = logits.float()
1184
+
1185
+ loss = None
1186
+ if labels is not None:
1187
+ # Shift so that tokens < n predict n
1188
+ shift_logits = logits[..., :-1, :].contiguous()
1189
+ shift_labels = labels[..., 1:].contiguous()
1190
+ # Flatten the tokens
1191
+ loss_fct = CrossEntropyLoss()
1192
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1193
+ shift_labels = shift_labels.view(-1)
1194
+ # Enable model parallelism
1195
+ shift_labels = shift_labels.to(shift_logits.device)
1196
+ loss = loss_fct(shift_logits, shift_labels)
1197
+
1198
+ if not return_dict:
1199
+ output = (logits,) + outputs[1:]
1200
+ return (loss,) + output if loss is not None else output
1201
+
1202
+ return CausalLMOutputWithPast(
1203
+ loss=loss,
1204
+ logits=logits,
1205
+ past_key_values=outputs.past_key_values,
1206
+ hidden_states=outputs.hidden_states,
1207
+ attentions=outputs.attentions,
1208
+ )
1209
+
1210
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
1211
+ def prepare_inputs_for_generation(
1212
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1213
+ ):
1214
+ if past_key_values is not None:
1215
+ if isinstance(past_key_values, Cache):
1216
+ cache_length = past_key_values.get_seq_length()
1217
+ past_length = past_key_values.seen_tokens
1218
+ max_cache_length = past_key_values.get_max_length()
1219
+ else:
1220
+ cache_length = past_length = past_key_values[0][0].shape[2]
1221
+ max_cache_length = None
1222
+
1223
+ # Keep only the unprocessed tokens:
1224
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1225
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1226
+ # input)
1227
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1228
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1229
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1230
+ # input_ids based on the past_length.
1231
+ elif past_length < input_ids.shape[1]:
1232
+ input_ids = input_ids[:, past_length:]
1233
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1234
+
1235
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1236
+ if (
1237
+ max_cache_length is not None
1238
+ and attention_mask is not None
1239
+ and cache_length + input_ids.shape[1] > max_cache_length
1240
+ ):
1241
+ attention_mask = attention_mask[:, -max_cache_length:]
1242
+
1243
+ position_ids = kwargs.get("position_ids", None)
1244
+ if attention_mask is not None and position_ids is None:
1245
+ # create position_ids on the fly for batch generation
1246
+ position_ids = attention_mask.long().cumsum(-1) - 1
1247
+ position_ids.masked_fill_(attention_mask == 0, 1)
1248
+ if past_key_values:
1249
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1250
+
1251
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1252
+ if inputs_embeds is not None and past_key_values is None:
1253
+ model_inputs = {"inputs_embeds": inputs_embeds}
1254
+ else:
1255
+ model_inputs = {"input_ids": input_ids}
1256
+
1257
+ model_inputs.update(
1258
+ {
1259
+ "position_ids": position_ids,
1260
+ "past_key_values": past_key_values,
1261
+ "use_cache": kwargs.get("use_cache"),
1262
+ "attention_mask": attention_mask,
1263
+ }
1264
+ )
1265
+ return model_inputs
1266
+
1267
+ @staticmethod
1268
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1269
+ def _reorder_cache(past_key_values, beam_idx):
1270
+ reordered_past = ()
1271
+ for layer_past in past_key_values:
1272
+ reordered_past += (
1273
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1274
+ )
1275
+ return reordered_past
1276
+
1277
+
1278
+ @add_start_docstrings(
1279
+ """
1280
+ The PhiModel with a sequence classification head on top (linear layer).
1281
+
1282
+ [`PhiForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1283
+ (e.g. GPT-2) do.
1284
+
1285
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1286
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1287
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1288
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1289
+ each row of the batch).
1290
+ """,
1291
+ PHI_START_DOCSTRING,
1292
+ )
1293
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->PHI,Llama->Phi with self.transformer->self.model, transformer_outputs->model_outputs
1294
+ class PhiForSequenceClassification(PhiPreTrainedModel):
1295
+ def __init__(self, config):
1296
+ super().__init__(config)
1297
+ self.num_labels = config.num_labels
1298
+ self.model = PhiModel(config)
1299
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1300
+
1301
+ # Initialize weights and apply final processing
1302
+ self.post_init()
1303
+
1304
+ def get_input_embeddings(self):
1305
+ return self.model.embed_tokens
1306
+
1307
+ def set_input_embeddings(self, value):
1308
+ self.model.embed_tokens = value
1309
+
1310
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
1311
+ def forward(
1312
+ self,
1313
+ input_ids: torch.LongTensor = None,
1314
+ attention_mask: Optional[torch.Tensor] = None,
1315
+ position_ids: Optional[torch.LongTensor] = None,
1316
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1317
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1318
+ labels: Optional[torch.LongTensor] = None,
1319
+ use_cache: Optional[bool] = None,
1320
+ output_attentions: Optional[bool] = None,
1321
+ output_hidden_states: Optional[bool] = None,
1322
+ return_dict: Optional[bool] = None,
1323
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1324
+ r"""
1325
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1326
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1327
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1328
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1329
+ """
1330
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1331
+
1332
+ model_outputs = self.model(
1333
+ input_ids,
1334
+ attention_mask=attention_mask,
1335
+ position_ids=position_ids,
1336
+ past_key_values=past_key_values,
1337
+ inputs_embeds=inputs_embeds,
1338
+ use_cache=use_cache,
1339
+ output_attentions=output_attentions,
1340
+ output_hidden_states=output_hidden_states,
1341
+ return_dict=return_dict,
1342
+ )
1343
+ hidden_states = model_outputs[0]
1344
+ logits = self.score(hidden_states)
1345
+
1346
+ if input_ids is not None:
1347
+ batch_size = input_ids.shape[0]
1348
+ else:
1349
+ batch_size = inputs_embeds.shape[0]
1350
+
1351
+ if self.config.pad_token_id is None and batch_size != 1:
1352
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1353
+ if self.config.pad_token_id is None:
1354
+ sequence_lengths = -1
1355
+ else:
1356
+ if input_ids is not None:
1357
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1358
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1359
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1360
+ sequence_lengths = sequence_lengths.to(logits.device)
1361
+ else:
1362
+ sequence_lengths = -1
1363
+
1364
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1365
+
1366
+ loss = None
1367
+ if labels is not None:
1368
+ labels = labels.to(logits.device)
1369
+ if self.config.problem_type is None:
1370
+ if self.num_labels == 1:
1371
+ self.config.problem_type = "regression"
1372
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1373
+ self.config.problem_type = "single_label_classification"
1374
+ else:
1375
+ self.config.problem_type = "multi_label_classification"
1376
+
1377
+ if self.config.problem_type == "regression":
1378
+ loss_fct = MSELoss()
1379
+ if self.num_labels == 1:
1380
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1381
+ else:
1382
+ loss = loss_fct(pooled_logits, labels)
1383
+ elif self.config.problem_type == "single_label_classification":
1384
+ loss_fct = CrossEntropyLoss()
1385
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1386
+ elif self.config.problem_type == "multi_label_classification":
1387
+ loss_fct = BCEWithLogitsLoss()
1388
+ loss = loss_fct(pooled_logits, labels)
1389
+ if not return_dict:
1390
+ output = (pooled_logits,) + model_outputs[1:]
1391
+ return ((loss,) + output) if loss is not None else output
1392
+
1393
+ return SequenceClassifierOutputWithPast(
1394
+ loss=loss,
1395
+ logits=pooled_logits,
1396
+ past_key_values=model_outputs.past_key_values,
1397
+ hidden_states=model_outputs.hidden_states,
1398
+ attentions=model_outputs.attentions,
1399
+ )
1400
+
1401
+
1402
+ @add_start_docstrings(
1403
+ """
1404
+ PhiModel with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1405
+ Named-Entity-Recognition (NER) tasks.
1406
+ """,
1407
+ PHI_START_DOCSTRING,
1408
+ )
1409
+ # Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with MPT->PHI,Mpt->Phi,self.transformer->self.model,transformer_outputs->model_outputs
1410
+ class PhiForTokenClassification(PhiPreTrainedModel):
1411
+ def __init__(self, config: PhiConfig):
1412
+ super().__init__(config)
1413
+ self.num_labels = config.num_labels
1414
+
1415
+ self.model = PhiModel(config)
1416
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1417
+ classifier_dropout = config.classifier_dropout
1418
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1419
+ classifier_dropout = config.hidden_dropout
1420
+ else:
1421
+ classifier_dropout = 0.1
1422
+ self.dropout = nn.Dropout(classifier_dropout)
1423
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1424
+
1425
+ # Initialize weights and apply final processing
1426
+ self.post_init()
1427
+
1428
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
1429
+ @add_code_sample_docstrings(
1430
+ checkpoint=_CHECKPOINT_FOR_DOC,
1431
+ output_type=TokenClassifierOutput,
1432
+ config_class=_CONFIG_FOR_DOC,
1433
+ )
1434
+ def forward(
1435
+ self,
1436
+ input_ids: Optional[torch.LongTensor] = None,
1437
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1438
+ attention_mask: Optional[torch.Tensor] = None,
1439
+ inputs_embeds: Optional[torch.Tensor] = None,
1440
+ labels: Optional[torch.Tensor] = None,
1441
+ use_cache: Optional[bool] = None,
1442
+ output_attentions: Optional[bool] = None,
1443
+ output_hidden_states: Optional[bool] = None,
1444
+ return_dict: Optional[bool] = None,
1445
+ **deprecated_arguments,
1446
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1447
+ r"""
1448
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1449
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1450
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1451
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1452
+ """
1453
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1454
+
1455
+ model_outputs = self.model(
1456
+ input_ids,
1457
+ past_key_values=past_key_values,
1458
+ attention_mask=attention_mask,
1459
+ inputs_embeds=inputs_embeds,
1460
+ use_cache=use_cache,
1461
+ output_attentions=output_attentions,
1462
+ output_hidden_states=output_hidden_states,
1463
+ return_dict=return_dict,
1464
+ )
1465
+
1466
+ hidden_states = model_outputs[0]
1467
+ hidden_states = self.dropout(hidden_states)
1468
+ logits = self.classifier(hidden_states)
1469
+
1470
+ loss = None
1471
+ if labels is not None:
1472
+ # move labels to correct device to enable model parallelism
1473
+ labels = labels.to(logits.device)
1474
+ batch_size, seq_length = labels.shape
1475
+ loss_fct = CrossEntropyLoss()
1476
+ loss = loss_fct(
1477
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1478
+ )
1479
+
1480
+ if not return_dict:
1481
+ output = (logits,) + model_outputs[2:]
1482
+ return ((loss,) + output) if loss is not None else output
1483
+
1484
+ return TokenClassifierOutput(
1485
+ loss=loss,
1486
+ logits=logits,
1487
+ hidden_states=model_outputs.hidden_states,
1488
+ attentions=model_outputs.attentions,
1489
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/speech_encoder_decoder/__init__.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
18
+
19
+
20
+ _import_structure = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
21
+
22
+ try:
23
+ if not is_torch_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["modeling_speech_encoder_decoder"] = ["SpeechEncoderDecoderModel"]
29
+
30
+ try:
31
+ if not is_flax_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_flax_speech_encoder_decoder"] = ["FlaxSpeechEncoderDecoderModel"]
37
+
38
+ if TYPE_CHECKING:
39
+ from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
40
+
41
+ try:
42
+ if not is_torch_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
48
+
49
+ try:
50
+ if not is_flax_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
56
+
57
+ else:
58
+ import sys
59
+
60
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)