applied-ai-018 commited on
Commit
7b2af3c
·
verified ·
1 Parent(s): d315ee6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__init__.py +148 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/convert_mbart_original_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/configuration_mbart.py +386 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py +83 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/modeling_flax_mbart.py +1771 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/modeling_mbart.py +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/modeling_tf_mbart.py +1573 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart.py +337 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart_fast.py +270 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__init__.py +69 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/__init__.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/configuration_megatron_bert.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/convert_megatron_bert_checkpoint.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/modeling_megatron_bert.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/configuration_megatron_bert.py +129 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py +334 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py +1836 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__init__.py +85 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/configuration_mobilenet_v1.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/feature_extraction_mobilenet_v1.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/image_processing_mobilenet_v1.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/modeling_mobilenet_v1.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +126 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py +142 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py +33 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +326 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +482 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__init__.py +62 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/__init__.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/configuration_mpt.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/modeling_mpt.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/configuration_mpt.py +246 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/modeling_mpt.py +942 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py +67 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/convert_musicgen_transformers.py +235 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/processing_musicgen.py +140 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__init__.py +59 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/__init__.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/configuration_recurrent_gemma.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/convert_recurrent_gemma_to_hf.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/modeling_recurrent_gemma.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__init__.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_mbart"] = ["MBartTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_mbart_fast"] = ["MBartTokenizerFast"]
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ _import_structure["modeling_mbart"] = [
52
+ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
53
+ "MBartForCausalLM",
54
+ "MBartForConditionalGeneration",
55
+ "MBartForQuestionAnswering",
56
+ "MBartForSequenceClassification",
57
+ "MBartModel",
58
+ "MBartPreTrainedModel",
59
+ ]
60
+
61
+ try:
62
+ if not is_tf_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_tf_mbart"] = [
68
+ "TFMBartForConditionalGeneration",
69
+ "TFMBartModel",
70
+ "TFMBartPreTrainedModel",
71
+ ]
72
+
73
+ try:
74
+ if not is_flax_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ _import_structure["modeling_flax_mbart"] = [
80
+ "FlaxMBartForConditionalGeneration",
81
+ "FlaxMBartForQuestionAnswering",
82
+ "FlaxMBartForSequenceClassification",
83
+ "FlaxMBartModel",
84
+ "FlaxMBartPreTrainedModel",
85
+ ]
86
+
87
+
88
+ if TYPE_CHECKING:
89
+ from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
90
+
91
+ try:
92
+ if not is_sentencepiece_available():
93
+ raise OptionalDependencyNotAvailable()
94
+ except OptionalDependencyNotAvailable:
95
+ pass
96
+ else:
97
+ from .tokenization_mbart import MBartTokenizer
98
+
99
+ try:
100
+ if not is_tokenizers_available():
101
+ raise OptionalDependencyNotAvailable()
102
+ except OptionalDependencyNotAvailable:
103
+ pass
104
+ else:
105
+ from .tokenization_mbart_fast import MBartTokenizerFast
106
+
107
+ try:
108
+ if not is_torch_available():
109
+ raise OptionalDependencyNotAvailable()
110
+ except OptionalDependencyNotAvailable:
111
+ pass
112
+ else:
113
+ from .modeling_mbart import (
114
+ MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
115
+ MBartForCausalLM,
116
+ MBartForConditionalGeneration,
117
+ MBartForQuestionAnswering,
118
+ MBartForSequenceClassification,
119
+ MBartModel,
120
+ MBartPreTrainedModel,
121
+ )
122
+
123
+ try:
124
+ if not is_tf_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
130
+
131
+ try:
132
+ if not is_flax_available():
133
+ raise OptionalDependencyNotAvailable()
134
+ except OptionalDependencyNotAvailable:
135
+ pass
136
+ else:
137
+ from .modeling_flax_mbart import (
138
+ FlaxMBartForConditionalGeneration,
139
+ FlaxMBartForQuestionAnswering,
140
+ FlaxMBartForSequenceClassification,
141
+ FlaxMBartModel,
142
+ FlaxMBartPreTrainedModel,
143
+ )
144
+
145
+ else:
146
+ import sys
147
+
148
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/convert_mbart_original_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc ADDED
Binary file (48.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc ADDED
Binary file (66.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc ADDED
Binary file (51.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc ADDED
Binary file (9.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/configuration_mbart.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MBART model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, Mapping, Optional
18
+
19
+ from ... import PreTrainedTokenizer
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
22
+ from ...onnx.utils import compute_effective_axis_dimension
23
+ from ...utils import TensorType, is_torch_available, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class MBartConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the MBART
34
+ [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50265):
42
+ Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`].
44
+ d_model (`int`, *optional*, defaults to 1024):
45
+ Dimensionality of the layers and the pooler layer.
46
+ encoder_layers (`int`, *optional*, defaults to 12):
47
+ Number of encoder layers.
48
+ decoder_layers (`int`, *optional*, defaults to 12):
49
+ Number of decoder layers.
50
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
53
+ Number of attention heads for each attention layer in the Transformer decoder.
54
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
55
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
56
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
57
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
58
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
61
+ dropout (`float`, *optional*, defaults to 0.1):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ activation_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio for activations inside the fully connected layer.
67
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
68
+ The dropout ratio for classifier.
69
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
70
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
71
+ just in case (e.g., 512 or 1024 or 2048).
72
+ init_std (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
75
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
76
+ for more details.
77
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
78
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
79
+ for more details.
80
+ scale_embedding (`bool`, *optional*, defaults to `False`):
81
+ Scale embeddings by diving by sqrt(d_model).
82
+ use_cache (`bool`, *optional*, defaults to `True`):
83
+ Whether or not the model should return the last key/values attentions (not used by all models)
84
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
85
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
86
+ `eos_token_id`.
87
+
88
+ Example:
89
+
90
+ ```python
91
+ >>> from transformers import MBartConfig, MBartModel
92
+
93
+ >>> # Initializing a MBART facebook/mbart-large-cc25 style configuration
94
+ >>> configuration = MBartConfig()
95
+
96
+ >>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration
97
+ >>> model = MBartModel(configuration)
98
+
99
+ >>> # Accessing the model configuration
100
+ >>> configuration = model.config
101
+ ```"""
102
+
103
+ model_type = "mbart"
104
+ keys_to_ignore_at_inference = ["past_key_values"]
105
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=50265,
110
+ max_position_embeddings=1024,
111
+ encoder_layers=12,
112
+ encoder_ffn_dim=4096,
113
+ encoder_attention_heads=16,
114
+ decoder_layers=12,
115
+ decoder_ffn_dim=4096,
116
+ decoder_attention_heads=16,
117
+ encoder_layerdrop=0.0,
118
+ decoder_layerdrop=0.0,
119
+ use_cache=True,
120
+ is_encoder_decoder=True,
121
+ activation_function="gelu",
122
+ d_model=1024,
123
+ dropout=0.1,
124
+ attention_dropout=0.0,
125
+ activation_dropout=0.0,
126
+ init_std=0.02,
127
+ classifier_dropout=0.0,
128
+ scale_embedding=False,
129
+ pad_token_id=1,
130
+ bos_token_id=0,
131
+ eos_token_id=2,
132
+ forced_eos_token_id=2,
133
+ **kwargs,
134
+ ):
135
+ self.vocab_size = vocab_size
136
+ self.max_position_embeddings = max_position_embeddings
137
+ self.d_model = d_model
138
+ self.encoder_ffn_dim = encoder_ffn_dim
139
+ self.encoder_layers = encoder_layers
140
+ self.encoder_attention_heads = encoder_attention_heads
141
+ self.decoder_ffn_dim = decoder_ffn_dim
142
+ self.decoder_layers = decoder_layers
143
+ self.decoder_attention_heads = decoder_attention_heads
144
+ self.dropout = dropout
145
+ self.attention_dropout = attention_dropout
146
+ self.activation_dropout = activation_dropout
147
+ self.activation_function = activation_function
148
+ self.init_std = init_std
149
+ self.encoder_layerdrop = encoder_layerdrop
150
+ self.decoder_layerdrop = decoder_layerdrop
151
+ self.classifier_dropout = classifier_dropout
152
+ self.use_cache = use_cache
153
+ self.num_hidden_layers = encoder_layers
154
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
155
+ super().__init__(
156
+ pad_token_id=pad_token_id,
157
+ bos_token_id=bos_token_id,
158
+ eos_token_id=eos_token_id,
159
+ is_encoder_decoder=is_encoder_decoder,
160
+ forced_eos_token_id=forced_eos_token_id,
161
+ **kwargs,
162
+ )
163
+
164
+
165
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig with Bart->MBart
166
+ class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast):
167
+ @property
168
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
169
+ if self.task in ["default", "seq2seq-lm"]:
170
+ common_inputs = OrderedDict(
171
+ [
172
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
173
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
174
+ ]
175
+ )
176
+
177
+ if self.use_past:
178
+ common_inputs["decoder_input_ids"] = {0: "batch"}
179
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
180
+ else:
181
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
182
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
183
+
184
+ if self.use_past:
185
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
186
+ elif self.task == "causal-lm":
187
+ # TODO: figure this case out.
188
+ common_inputs = OrderedDict(
189
+ [
190
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
191
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
192
+ ]
193
+ )
194
+ if self.use_past:
195
+ num_encoder_layers, _ = self.num_layers
196
+ for i in range(num_encoder_layers):
197
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
198
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
199
+ else:
200
+ common_inputs = OrderedDict(
201
+ [
202
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
203
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
204
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
205
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
206
+ ]
207
+ )
208
+
209
+ return common_inputs
210
+
211
+ @property
212
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
213
+ if self.task in ["default", "seq2seq-lm"]:
214
+ common_outputs = super().outputs
215
+ else:
216
+ common_outputs = super(OnnxConfigWithPast, self).outputs
217
+ if self.use_past:
218
+ num_encoder_layers, _ = self.num_layers
219
+ for i in range(num_encoder_layers):
220
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
221
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
222
+ return common_outputs
223
+
224
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
225
+ self,
226
+ tokenizer: PreTrainedTokenizer,
227
+ batch_size: int = -1,
228
+ seq_length: int = -1,
229
+ is_pair: bool = False,
230
+ framework: Optional[TensorType] = None,
231
+ ) -> Mapping[str, Any]:
232
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
233
+ tokenizer, batch_size, seq_length, is_pair, framework
234
+ )
235
+
236
+ # Generate decoder inputs
237
+ decoder_seq_length = seq_length if not self.use_past else 1
238
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
239
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
240
+ )
241
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
242
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
243
+
244
+ if self.use_past:
245
+ if not is_torch_available():
246
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
247
+ else:
248
+ import torch
249
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
250
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
251
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
252
+ encoder_shape = (
253
+ batch,
254
+ num_encoder_attention_heads,
255
+ encoder_seq_length,
256
+ self._config.hidden_size // num_encoder_attention_heads,
257
+ )
258
+ decoder_past_length = decoder_seq_length + 3
259
+ decoder_shape = (
260
+ batch,
261
+ num_decoder_attention_heads,
262
+ decoder_past_length,
263
+ self._config.hidden_size // num_decoder_attention_heads,
264
+ )
265
+
266
+ common_inputs["decoder_attention_mask"] = torch.cat(
267
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
268
+ )
269
+
270
+ common_inputs["past_key_values"] = []
271
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
272
+ num_encoder_layers, num_decoder_layers = self.num_layers
273
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
274
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
275
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
276
+
277
+ for _ in range(min_num_layers):
278
+ common_inputs["past_key_values"].append(
279
+ (
280
+ torch.zeros(decoder_shape),
281
+ torch.zeros(decoder_shape),
282
+ torch.zeros(encoder_shape),
283
+ torch.zeros(encoder_shape),
284
+ )
285
+ )
286
+ # TODO: test this.
287
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
288
+ for _ in range(min_num_layers, max_num_layers):
289
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
290
+ return common_inputs
291
+
292
+ def _generate_dummy_inputs_for_causal_lm(
293
+ self,
294
+ tokenizer: PreTrainedTokenizer,
295
+ batch_size: int = -1,
296
+ seq_length: int = -1,
297
+ is_pair: bool = False,
298
+ framework: Optional[TensorType] = None,
299
+ ) -> Mapping[str, Any]:
300
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
301
+ tokenizer, batch_size, seq_length, is_pair, framework
302
+ )
303
+
304
+ if self.use_past:
305
+ if not is_torch_available():
306
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
307
+ else:
308
+ import torch
309
+ batch, seqlen = common_inputs["input_ids"].shape
310
+ # Not using the same length for past_key_values
311
+ past_key_values_length = seqlen + 2
312
+ num_encoder_layers, _ = self.num_layers
313
+ num_encoder_attention_heads, _ = self.num_attention_heads
314
+ past_shape = (
315
+ batch,
316
+ num_encoder_attention_heads,
317
+ past_key_values_length,
318
+ self._config.hidden_size // num_encoder_attention_heads,
319
+ )
320
+
321
+ mask_dtype = common_inputs["attention_mask"].dtype
322
+ common_inputs["attention_mask"] = torch.cat(
323
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
324
+ )
325
+ common_inputs["past_key_values"] = [
326
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
327
+ ]
328
+ return common_inputs
329
+
330
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
331
+ self,
332
+ tokenizer: PreTrainedTokenizer,
333
+ batch_size: int = -1,
334
+ seq_length: int = -1,
335
+ is_pair: bool = False,
336
+ framework: Optional[TensorType] = None,
337
+ ) -> Mapping[str, Any]:
338
+ # Copied from OnnxConfig.generate_dummy_inputs
339
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
340
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
341
+ batch_size = compute_effective_axis_dimension(
342
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
343
+ )
344
+
345
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
346
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
347
+ seq_length = compute_effective_axis_dimension(
348
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
349
+ )
350
+
351
+ # Generate dummy inputs according to compute batch and sequence
352
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
353
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
354
+ return common_inputs
355
+
356
+ def generate_dummy_inputs(
357
+ self,
358
+ tokenizer: PreTrainedTokenizer,
359
+ batch_size: int = -1,
360
+ seq_length: int = -1,
361
+ is_pair: bool = False,
362
+ framework: Optional[TensorType] = None,
363
+ ) -> Mapping[str, Any]:
364
+ if self.task in ["default", "seq2seq-lm"]:
365
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
366
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
367
+ )
368
+
369
+ elif self.task == "causal-lm":
370
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
371
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
372
+ )
373
+ else:
374
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
375
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
376
+ )
377
+
378
+ return common_inputs
379
+
380
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
381
+ if self.task in ["default", "seq2seq-lm"]:
382
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
383
+ else:
384
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
385
+ flattened_output, name, idx, t
386
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+ import torch
18
+ from torch import nn
19
+
20
+ from transformers import MBartConfig, MBartForConditionalGeneration
21
+
22
+
23
+ def remove_ignore_keys_(state_dict):
24
+ ignore_keys = [
25
+ "encoder.version",
26
+ "decoder.version",
27
+ "model.encoder.version",
28
+ "model.decoder.version",
29
+ "_float_tensor",
30
+ "decoder.output_projection.weight",
31
+ ]
32
+ for k in ignore_keys:
33
+ state_dict.pop(k, None)
34
+
35
+
36
+ def make_linear_from_emb(emb):
37
+ vocab_size, emb_size = emb.weight.shape
38
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
39
+ lin_layer.weight.data = emb.weight.data
40
+ return lin_layer
41
+
42
+
43
+ def convert_fairseq_mbart_checkpoint_from_disk(
44
+ checkpoint_path, hf_config_path="facebook/mbart-large-en-ro", finetuned=False, mbart_50=False
45
+ ):
46
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
47
+ remove_ignore_keys_(state_dict)
48
+ vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0]
49
+
50
+ mbart_config = MBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size)
51
+ if mbart_50 and finetuned:
52
+ mbart_config.activation_function = "relu"
53
+
54
+ state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
55
+ model = MBartForConditionalGeneration(mbart_config)
56
+ model.model.load_state_dict(state_dict)
57
+
58
+ if finetuned:
59
+ model.lm_head = make_linear_from_emb(model.model.shared)
60
+
61
+ return model
62
+
63
+
64
+ if __name__ == "__main__":
65
+ parser = argparse.ArgumentParser()
66
+ # Required parameters
67
+ parser.add_argument(
68
+ "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
69
+ )
70
+ parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
71
+ parser.add_argument(
72
+ "--hf_config",
73
+ default="facebook/mbart-large-cc25",
74
+ type=str,
75
+ help="Which huggingface architecture to use: mbart-large",
76
+ )
77
+ parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
78
+ parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
79
+ args = parser.parse_args()
80
+ model = convert_fairseq_mbart_checkpoint_from_disk(
81
+ args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_50=args.mbart_50
82
+ )
83
+ model.save_pretrained(args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/modeling_flax_mbart.py ADDED
@@ -0,0 +1,1771 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flax MBart model."""
16
+
17
+ import math
18
+ import random
19
+ from functools import partial
20
+ from typing import Callable, Optional, Tuple
21
+
22
+ import flax.linen as nn
23
+ import jax
24
+ import jax.numpy as jnp
25
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
26
+ from flax.linen import combine_masks, make_causal_mask
27
+ from flax.linen.attention import dot_product_attention_weights
28
+ from flax.traverse_util import flatten_dict, unflatten_dict
29
+ from jax import lax
30
+ from jax.random import PRNGKey
31
+
32
+ from ...modeling_flax_outputs import (
33
+ FlaxBaseModelOutput,
34
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
35
+ FlaxCausalLMOutputWithCrossAttentions,
36
+ FlaxSeq2SeqLMOutput,
37
+ FlaxSeq2SeqModelOutput,
38
+ FlaxSeq2SeqQuestionAnsweringModelOutput,
39
+ FlaxSeq2SeqSequenceClassifierOutput,
40
+ )
41
+ from ...modeling_flax_utils import (
42
+ ACT2FN,
43
+ FlaxPreTrainedModel,
44
+ append_call_sample_docstring,
45
+ append_replace_return_docstrings,
46
+ overwrite_call_docstring,
47
+ )
48
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
49
+ from .configuration_mbart import MBartConfig
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
55
+ _CONFIG_FOR_DOC = "MBartConfig"
56
+
57
+
58
+ MBART_START_DOCSTRING = r"""
59
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
60
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
61
+ etc.)
62
+
63
+ This model is also a Flax Linen
64
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
65
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
66
+
67
+ Finally, this model supports inherent JAX features such as:
68
+
69
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
70
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
71
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
72
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
73
+
74
+ Parameters:
75
+ config ([`MBartConfig`]): Model configuration class with all the parameters of the model.
76
+ Initializing with a config file does not load the weights associated with the model, only the
77
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
78
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
79
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
80
+ `jax.numpy.bfloat16` (on TPUs).
81
+
82
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
83
+ specified all the computation will be performed with the given `dtype`.
84
+
85
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
86
+ parameters.**
87
+
88
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
89
+ [`~FlaxPreTrainedModel.to_bf16`].
90
+ """
91
+
92
+ MBART_INPUTS_DOCSTRING = r"""
93
+ Args:
94
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
95
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
96
+ it.
97
+
98
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
99
+ [`PreTrainedTokenizer.__call__`] for details.
100
+
101
+ [What are input IDs?](../glossary#input-ids)
102
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
103
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
104
+
105
+ - 1 for tokens that are **not masked**,
106
+ - 0 for tokens that are **masked**.
107
+
108
+ [What are attention masks?](../glossary#attention-mask)
109
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
110
+ Indices of decoder input sequence tokens in the vocabulary.
111
+
112
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
113
+ [`PreTrainedTokenizer.__call__`] for details.
114
+
115
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
116
+
117
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
118
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
119
+ for denoising pre-training following the paper.
120
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
121
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
122
+ be used by default.
123
+
124
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
125
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
126
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
127
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
128
+ config.max_position_embeddings - 1]`.
129
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
130
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
131
+ range `[0, config.max_position_embeddings - 1]`.
132
+ output_attentions (`bool`, *optional*):
133
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
134
+ tensors for more detail.
135
+ output_hidden_states (`bool`, *optional*):
136
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
137
+ more detail.
138
+ return_dict (`bool`, *optional*):
139
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
140
+ """
141
+
142
+
143
+ MBART_ENCODE_INPUTS_DOCSTRING = r"""
144
+ Args:
145
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
146
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
147
+ it.
148
+
149
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
150
+ [`PreTrainedTokenizer.__call__`] for details.
151
+
152
+ [What are input IDs?](../glossary#input-ids)
153
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
154
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
155
+
156
+ - 1 for tokens that are **not masked**,
157
+ - 0 for tokens that are **masked**.
158
+
159
+ [What are attention masks?](../glossary#attention-mask)
160
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
161
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
162
+ config.max_position_embeddings - 1]`.
163
+ output_attentions (`bool`, *optional*):
164
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
165
+ tensors for more detail.
166
+ output_hidden_states (`bool`, *optional*):
167
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
168
+ more detail.
169
+ return_dict (`bool`, *optional*):
170
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
171
+ """
172
+
173
+ MBART_DECODE_INPUTS_DOCSTRING = r"""
174
+ Args:
175
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
176
+ Indices of decoder input sequence tokens in the vocabulary.
177
+
178
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
179
+ [`PreTrainedTokenizer.__call__`] for details.
180
+
181
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
182
+
183
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
184
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
185
+ for denoising pre-training following the paper.
186
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
187
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
188
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
189
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
190
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
191
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
192
+
193
+ - 1 for tokens that are **not masked**,
194
+ - 0 for tokens that are **masked**.
195
+
196
+ [What are attention masks?](../glossary#attention-mask)
197
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
198
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
199
+ be used by default.
200
+
201
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
202
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
203
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
204
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
205
+ range `[0, config.max_position_embeddings - 1]`.
206
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
207
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
208
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
209
+ output_attentions (`bool`, *optional*):
210
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
211
+ tensors for more detail.
212
+ output_hidden_states (`bool`, *optional*):
213
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
214
+ more detail.
215
+ return_dict (`bool`, *optional*):
216
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
217
+ """
218
+
219
+
220
+ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int) -> jnp.ndarray:
221
+ """
222
+ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not
223
+ have a single `decoder_start_token_id` in contrast to other Bart-like models.
224
+ """
225
+ prev_output_tokens = jnp.array(input_ids).copy()
226
+
227
+ if pad_token_id is None:
228
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
229
+
230
+ # replace possible -100 values in labels by `pad_token_id`
231
+ prev_output_tokens = jnp.where(prev_output_tokens == -100, pad_token_id, input_ids)
232
+ index_of_eos = (jnp.where(prev_output_tokens != pad_token_id, 1, 0).sum(axis=-1) - 1).reshape(-1, 1)
233
+ decoder_start_tokens = jnp.array(
234
+ [prev_output_tokens[i, eos_idx] for i, eos_idx in enumerate(index_of_eos)], dtype=jnp.int32
235
+ ).squeeze()
236
+
237
+ prev_output_tokens = prev_output_tokens.at[:, 1:].set(prev_output_tokens[:, :-1])
238
+ prev_output_tokens = prev_output_tokens.at[:, 0].set(decoder_start_tokens)
239
+
240
+ return prev_output_tokens
241
+
242
+
243
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->MBart
244
+ class FlaxMBartAttention(nn.Module):
245
+ config: MBartConfig
246
+ embed_dim: int
247
+ num_heads: int
248
+ dropout: float = 0.0
249
+ causal: bool = False
250
+ bias: bool = True
251
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
252
+
253
+ def setup(self) -> None:
254
+ self.head_dim = self.embed_dim // self.num_heads
255
+ if self.head_dim * self.num_heads != self.embed_dim:
256
+ raise ValueError(
257
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
258
+ f" and `num_heads`: {self.num_heads})."
259
+ )
260
+
261
+ dense = partial(
262
+ nn.Dense,
263
+ self.embed_dim,
264
+ use_bias=self.bias,
265
+ dtype=self.dtype,
266
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
267
+ )
268
+
269
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
270
+ self.out_proj = dense()
271
+
272
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
273
+
274
+ if self.causal:
275
+ self.causal_mask = make_causal_mask(
276
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
277
+ )
278
+
279
+ def _split_heads(self, hidden_states):
280
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
281
+
282
+ def _merge_heads(self, hidden_states):
283
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
284
+
285
+ @nn.compact
286
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
287
+ """
288
+ This function takes projected key, value states from a single input token and concatenates the states to cached
289
+ states from previous steps. This function is slighly adapted from the official Flax repository:
290
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
291
+ """
292
+ # detect if we're initializing by absence of existing cache data.
293
+ is_initialized = self.has_variable("cache", "cached_key")
294
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
295
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
296
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
297
+
298
+ if is_initialized:
299
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
300
+ # update key, value caches with our new 1d spatial slices
301
+ cur_index = cache_index.value
302
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
303
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
304
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
305
+ cached_key.value = key
306
+ cached_value.value = value
307
+ num_updated_cache_vectors = query.shape[1]
308
+ cache_index.value = cache_index.value + num_updated_cache_vectors
309
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
310
+ pad_mask = jnp.broadcast_to(
311
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
312
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
313
+ )
314
+ attention_mask = combine_masks(pad_mask, attention_mask)
315
+ return key, value, attention_mask
316
+
317
+ def __call__(
318
+ self,
319
+ hidden_states: jnp.ndarray,
320
+ key_value_states: Optional[jnp.ndarray] = None,
321
+ attention_mask: Optional[jnp.ndarray] = None,
322
+ init_cache: bool = False,
323
+ deterministic: bool = True,
324
+ ) -> Tuple[jnp.ndarray]:
325
+ """Input shape: Batch x Time x Channel"""
326
+
327
+ # if key_value_states are provided this layer is used as a cross-attention layer
328
+ # for the decoder
329
+ is_cross_attention = key_value_states is not None
330
+ batch_size = hidden_states.shape[0]
331
+
332
+ # get query proj
333
+ query_states = self.q_proj(hidden_states)
334
+ # get key, value proj
335
+ if is_cross_attention:
336
+ # cross_attentions
337
+ key_states = self.k_proj(key_value_states)
338
+ value_states = self.v_proj(key_value_states)
339
+ else:
340
+ # self_attention
341
+ key_states = self.k_proj(hidden_states)
342
+ value_states = self.v_proj(hidden_states)
343
+
344
+ query_states = self._split_heads(query_states)
345
+ key_states = self._split_heads(key_states)
346
+ value_states = self._split_heads(value_states)
347
+
348
+ # handle cache prepare causal attention mask
349
+ if self.causal:
350
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
351
+ if self.has_variable("cache", "cached_key"):
352
+ mask_shift = self.variables["cache"]["cache_index"]
353
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
354
+ causal_mask = lax.dynamic_slice(
355
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
356
+ )
357
+ else:
358
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
359
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
360
+
361
+ # combine masks if needed
362
+ if attention_mask is not None and self.causal:
363
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
364
+ attention_mask = combine_masks(attention_mask, causal_mask)
365
+ elif self.causal:
366
+ attention_mask = causal_mask
367
+ elif attention_mask is not None:
368
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
369
+
370
+ # During fast autoregressive decoding, we feed one position at a time,
371
+ # and cache the keys and values step by step.
372
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
373
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
374
+ key_states, value_states, query_states, attention_mask
375
+ )
376
+
377
+ # Convert the boolean attention mask to an attention bias.
378
+ if attention_mask is not None:
379
+ # attention mask in the form of attention bias
380
+ attention_bias = lax.select(
381
+ attention_mask > 0,
382
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
383
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
384
+ )
385
+ else:
386
+ attention_bias = None
387
+
388
+ dropout_rng = None
389
+ if not deterministic and self.dropout > 0.0:
390
+ dropout_rng = self.make_rng("dropout")
391
+
392
+ attn_weights = dot_product_attention_weights(
393
+ query_states,
394
+ key_states,
395
+ bias=attention_bias,
396
+ dropout_rng=dropout_rng,
397
+ dropout_rate=self.dropout,
398
+ broadcast_dropout=True,
399
+ deterministic=deterministic,
400
+ dtype=self.dtype,
401
+ precision=None,
402
+ )
403
+
404
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
405
+ attn_output = self._merge_heads(attn_output)
406
+ attn_output = self.out_proj(attn_output)
407
+
408
+ return attn_output, attn_weights
409
+
410
+
411
+ class FlaxMBartEncoderLayer(nn.Module):
412
+ config: MBartConfig
413
+ dtype: jnp.dtype = jnp.float32
414
+
415
+ def setup(self) -> None:
416
+ self.embed_dim = self.config.d_model
417
+ self.self_attn = FlaxMBartAttention(
418
+ config=self.config,
419
+ embed_dim=self.embed_dim,
420
+ num_heads=self.config.encoder_attention_heads,
421
+ dropout=self.config.attention_dropout,
422
+ dtype=self.dtype,
423
+ )
424
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
425
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
426
+ self.activation_fn = ACT2FN[self.config.activation_function]
427
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
428
+ self.fc1 = nn.Dense(
429
+ self.config.encoder_ffn_dim,
430
+ dtype=self.dtype,
431
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
432
+ )
433
+ self.fc2 = nn.Dense(
434
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
435
+ )
436
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
437
+
438
+ def __call__(
439
+ self,
440
+ hidden_states: jnp.ndarray,
441
+ attention_mask: jnp.ndarray,
442
+ output_attentions: bool = True,
443
+ deterministic: bool = True,
444
+ ) -> Tuple[jnp.ndarray]:
445
+ residual = hidden_states
446
+ hidden_states = self.self_attn_layer_norm(hidden_states)
447
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
448
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
449
+ hidden_states = residual + hidden_states
450
+
451
+ residual = hidden_states
452
+ hidden_states = self.final_layer_norm(hidden_states)
453
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
454
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
455
+ hidden_states = self.fc2(hidden_states)
456
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
457
+ hidden_states = residual + hidden_states
458
+
459
+ outputs = (hidden_states,)
460
+
461
+ if output_attentions:
462
+ outputs += (attn_weights,)
463
+
464
+ return outputs
465
+
466
+
467
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->MBart
468
+ class FlaxMBartEncoderLayerCollection(nn.Module):
469
+ config: MBartConfig
470
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
471
+
472
+ def setup(self):
473
+ self.layers = [
474
+ FlaxMBartEncoderLayer(self.config, name=str(i), dtype=self.dtype)
475
+ for i in range(self.config.encoder_layers)
476
+ ]
477
+ self.layerdrop = self.config.encoder_layerdrop
478
+
479
+ def __call__(
480
+ self,
481
+ hidden_states,
482
+ attention_mask,
483
+ deterministic: bool = True,
484
+ output_attentions: bool = False,
485
+ output_hidden_states: bool = False,
486
+ return_dict: bool = True,
487
+ ):
488
+ all_attentions = () if output_attentions else None
489
+ all_hidden_states = () if output_hidden_states else None
490
+
491
+ for encoder_layer in self.layers:
492
+ if output_hidden_states:
493
+ all_hidden_states = all_hidden_states + (hidden_states,)
494
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
495
+ dropout_probability = random.uniform(0, 1)
496
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
497
+ layer_outputs = (None, None)
498
+ else:
499
+ layer_outputs = encoder_layer(
500
+ hidden_states,
501
+ attention_mask,
502
+ output_attentions,
503
+ deterministic,
504
+ )
505
+ hidden_states = layer_outputs[0]
506
+ if output_attentions:
507
+ all_attentions = all_attentions + (layer_outputs[1],)
508
+
509
+ if output_hidden_states:
510
+ all_hidden_states += (hidden_states,)
511
+
512
+ outputs = (hidden_states, all_hidden_states, all_attentions)
513
+
514
+ if not return_dict:
515
+ return tuple(v for v in outputs if v is not None)
516
+
517
+ return FlaxBaseModelOutput(
518
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
519
+ )
520
+
521
+
522
+ class FlaxMBartDecoderLayer(nn.Module):
523
+ config: MBartConfig
524
+ dtype: jnp.dtype = jnp.float32
525
+
526
+ def setup(self) -> None:
527
+ self.embed_dim = self.config.d_model
528
+ self.self_attn = FlaxMBartAttention(
529
+ config=self.config,
530
+ embed_dim=self.embed_dim,
531
+ num_heads=self.config.decoder_attention_heads,
532
+ dropout=self.config.attention_dropout,
533
+ causal=True,
534
+ dtype=self.dtype,
535
+ )
536
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
537
+ self.activation_fn = ACT2FN[self.config.activation_function]
538
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
539
+
540
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
541
+ self.encoder_attn = FlaxMBartAttention(
542
+ config=self.config,
543
+ embed_dim=self.embed_dim,
544
+ num_heads=self.config.decoder_attention_heads,
545
+ dropout=self.config.attention_dropout,
546
+ dtype=self.dtype,
547
+ )
548
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
549
+ self.fc1 = nn.Dense(
550
+ self.config.decoder_ffn_dim,
551
+ dtype=self.dtype,
552
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
553
+ )
554
+ self.fc2 = nn.Dense(
555
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
556
+ )
557
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
558
+
559
+ def __call__(
560
+ self,
561
+ hidden_states: jnp.ndarray,
562
+ attention_mask: jnp.ndarray,
563
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
564
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
565
+ init_cache: bool = False,
566
+ output_attentions: bool = True,
567
+ deterministic: bool = True,
568
+ ) -> Tuple[jnp.ndarray]:
569
+ residual = hidden_states
570
+ hidden_states = self.self_attn_layer_norm(hidden_states)
571
+
572
+ # Self Attention
573
+ hidden_states, self_attn_weights = self.self_attn(
574
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
575
+ )
576
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
577
+ hidden_states = residual + hidden_states
578
+
579
+ # Cross-Attention Block
580
+ cross_attn_weights = None
581
+ if encoder_hidden_states is not None:
582
+ residual = hidden_states
583
+
584
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
585
+ hidden_states, cross_attn_weights = self.encoder_attn(
586
+ hidden_states=hidden_states,
587
+ key_value_states=encoder_hidden_states,
588
+ attention_mask=encoder_attention_mask,
589
+ )
590
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
591
+ hidden_states = residual + hidden_states
592
+
593
+ # Fully Connected
594
+ residual = hidden_states
595
+ hidden_states = self.final_layer_norm(hidden_states)
596
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
597
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
598
+ hidden_states = self.fc2(hidden_states)
599
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
600
+ hidden_states = residual + hidden_states
601
+
602
+ outputs = (hidden_states,)
603
+
604
+ if output_attentions:
605
+ outputs += (self_attn_weights, cross_attn_weights)
606
+
607
+ return outputs
608
+
609
+
610
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->MBart
611
+ class FlaxMBartDecoderLayerCollection(nn.Module):
612
+ config: MBartConfig
613
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
614
+
615
+ def setup(self):
616
+ self.layers = [
617
+ FlaxMBartDecoderLayer(self.config, name=str(i), dtype=self.dtype)
618
+ for i in range(self.config.decoder_layers)
619
+ ]
620
+ self.layerdrop = self.config.decoder_layerdrop
621
+
622
+ def __call__(
623
+ self,
624
+ hidden_states,
625
+ attention_mask,
626
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
627
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
628
+ deterministic: bool = True,
629
+ init_cache: bool = False,
630
+ output_attentions: bool = False,
631
+ output_hidden_states: bool = False,
632
+ return_dict: bool = True,
633
+ ):
634
+ # decoder layers
635
+ all_hidden_states = () if output_hidden_states else None
636
+ all_self_attns = () if output_attentions else None
637
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
638
+
639
+ for decoder_layer in self.layers:
640
+ if output_hidden_states:
641
+ all_hidden_states += (hidden_states,)
642
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
643
+ dropout_probability = random.uniform(0, 1)
644
+ if not deterministic and (dropout_probability < self.layerdrop):
645
+ layer_outputs = (None, None, None)
646
+ else:
647
+ layer_outputs = decoder_layer(
648
+ hidden_states,
649
+ attention_mask=attention_mask,
650
+ encoder_hidden_states=encoder_hidden_states,
651
+ encoder_attention_mask=encoder_attention_mask,
652
+ init_cache=init_cache,
653
+ output_attentions=output_attentions,
654
+ deterministic=deterministic,
655
+ )
656
+
657
+ hidden_states = layer_outputs[0]
658
+ if output_attentions:
659
+ all_self_attns += (layer_outputs[1],)
660
+
661
+ if encoder_hidden_states is not None:
662
+ all_cross_attentions += (layer_outputs[2],)
663
+
664
+ # add hidden states from the last decoder layer
665
+ if output_hidden_states:
666
+ all_hidden_states += (hidden_states,)
667
+
668
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
669
+
670
+ if not return_dict:
671
+ return tuple(v for v in outputs if v is not None)
672
+
673
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
674
+ last_hidden_state=hidden_states,
675
+ hidden_states=all_hidden_states,
676
+ attentions=all_self_attns,
677
+ cross_attentions=all_cross_attentions,
678
+ )
679
+
680
+
681
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartClassificationHead with Bart->MBart
682
+ class FlaxMBartClassificationHead(nn.Module):
683
+ """Head for sentence-level classification tasks."""
684
+
685
+ config: MBartConfig
686
+ inner_dim: int
687
+ num_classes: int
688
+ pooler_dropout: float
689
+ dtype: jnp.dtype = jnp.float32
690
+
691
+ def setup(self):
692
+ self.dense = nn.Dense(
693
+ self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
694
+ )
695
+ self.dropout = nn.Dropout(rate=self.pooler_dropout)
696
+ self.out_proj = nn.Dense(
697
+ self.num_classes,
698
+ dtype=self.dtype,
699
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
700
+ )
701
+
702
+ def __call__(self, hidden_states: jnp.ndarray, deterministic: bool):
703
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
704
+ hidden_states = self.dense(hidden_states)
705
+ hidden_states = jnp.tanh(hidden_states)
706
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
707
+ hidden_states = self.out_proj(hidden_states)
708
+ return hidden_states
709
+
710
+
711
+ class FlaxMBartEncoder(nn.Module):
712
+ config: MBartConfig
713
+ embed_tokens: nn.Embed
714
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
715
+
716
+ def setup(self):
717
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
718
+
719
+ embed_dim = self.config.d_model
720
+ self.padding_idx = self.config.pad_token_id
721
+ self.max_source_positions = self.config.max_position_embeddings
722
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
723
+
724
+ # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
725
+ # and adjust num_embeddings appropriately. Other models don't have this hack
726
+ self.offset = 2
727
+ self.embed_positions = nn.Embed(
728
+ self.config.max_position_embeddings + self.offset,
729
+ embed_dim,
730
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
731
+ )
732
+ self.layers = FlaxMBartEncoderLayerCollection(self.config, self.dtype)
733
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
734
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
735
+
736
+ def __call__(
737
+ self,
738
+ input_ids,
739
+ attention_mask,
740
+ position_ids,
741
+ output_attentions: bool = False,
742
+ output_hidden_states: bool = False,
743
+ return_dict: bool = True,
744
+ deterministic: bool = True,
745
+ ):
746
+ input_shape = input_ids.shape
747
+ input_ids = input_ids.reshape(-1, input_shape[-1])
748
+
749
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
750
+
751
+ embed_pos = self.embed_positions(position_ids + self.offset)
752
+
753
+ hidden_states = inputs_embeds + embed_pos
754
+ hidden_states = self.layernorm_embedding(hidden_states)
755
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
756
+
757
+ outputs = self.layers(
758
+ hidden_states,
759
+ attention_mask,
760
+ deterministic=deterministic,
761
+ output_attentions=output_attentions,
762
+ output_hidden_states=output_hidden_states,
763
+ return_dict=return_dict,
764
+ )
765
+
766
+ last_hidden_states = outputs[0]
767
+ last_hidden_states = self.layer_norm(last_hidden_states)
768
+
769
+ # update the last element in `hidden_states` after applying `layernorm` above
770
+ hidden_states = None
771
+ if output_hidden_states:
772
+ hidden_states = outputs[1]
773
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
774
+
775
+ if not return_dict:
776
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
777
+ return tuple(v for v in outputs if v is not None)
778
+
779
+ return FlaxBaseModelOutput(
780
+ last_hidden_state=last_hidden_states,
781
+ hidden_states=hidden_states,
782
+ attentions=outputs.attentions,
783
+ )
784
+
785
+
786
+ class FlaxMBartDecoder(nn.Module):
787
+ config: MBartConfig
788
+ embed_tokens: nn.Embed
789
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
790
+
791
+ def setup(self):
792
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
793
+
794
+ embed_dim = self.config.d_model
795
+ self.padding_idx = self.config.pad_token_id
796
+ self.max_target_positions = self.config.max_position_embeddings
797
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
798
+
799
+ # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
800
+ # and adjust num_embeddings appropriately. Other models don't have this hack
801
+ self.offset = 2
802
+ self.embed_positions = nn.Embed(
803
+ self.config.max_position_embeddings + self.offset,
804
+ embed_dim,
805
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
806
+ )
807
+
808
+ self.layers = FlaxMBartDecoderLayerCollection(self.config, self.dtype)
809
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
810
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
811
+
812
+ def __call__(
813
+ self,
814
+ input_ids,
815
+ attention_mask,
816
+ position_ids,
817
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
818
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
819
+ init_cache: bool = False,
820
+ output_attentions: bool = False,
821
+ output_hidden_states: bool = False,
822
+ return_dict: bool = True,
823
+ deterministic: bool = True,
824
+ ):
825
+ input_shape = input_ids.shape
826
+ input_ids = input_ids.reshape(-1, input_shape[-1])
827
+
828
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
829
+
830
+ # embed positions
831
+ positions = self.embed_positions(position_ids + self.offset)
832
+
833
+ hidden_states = inputs_embeds + positions
834
+ hidden_states = self.layernorm_embedding(hidden_states)
835
+
836
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
837
+
838
+ outputs = self.layers(
839
+ hidden_states,
840
+ attention_mask,
841
+ encoder_hidden_states,
842
+ encoder_attention_mask,
843
+ deterministic=deterministic,
844
+ init_cache=init_cache,
845
+ output_attentions=output_attentions,
846
+ output_hidden_states=output_hidden_states,
847
+ return_dict=return_dict,
848
+ )
849
+
850
+ last_hidden_states = outputs[0]
851
+ last_hidden_states = self.layer_norm(last_hidden_states)
852
+
853
+ # update the last element in `hidden_states` after applying `layernorm` above
854
+ hidden_states = None
855
+ if output_hidden_states:
856
+ hidden_states = outputs[1]
857
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
858
+
859
+ if not return_dict:
860
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
861
+ return tuple(v for v in outputs if v is not None)
862
+
863
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
864
+ last_hidden_state=last_hidden_states,
865
+ hidden_states=hidden_states,
866
+ attentions=outputs.attentions,
867
+ cross_attentions=outputs.cross_attentions,
868
+ )
869
+
870
+
871
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->MBart
872
+ class FlaxMBartModule(nn.Module):
873
+ config: MBartConfig
874
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
875
+
876
+ def setup(self):
877
+ self.shared = nn.Embed(
878
+ self.config.vocab_size,
879
+ self.config.d_model,
880
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
881
+ dtype=self.dtype,
882
+ )
883
+
884
+ self.encoder = FlaxMBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
885
+ self.decoder = FlaxMBartDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
886
+
887
+ def _get_encoder_module(self):
888
+ return self.encoder
889
+
890
+ def _get_decoder_module(self):
891
+ return self.decoder
892
+
893
+ def __call__(
894
+ self,
895
+ input_ids,
896
+ attention_mask,
897
+ decoder_input_ids,
898
+ decoder_attention_mask,
899
+ position_ids,
900
+ decoder_position_ids,
901
+ output_attentions: bool = False,
902
+ output_hidden_states: bool = False,
903
+ return_dict: bool = True,
904
+ deterministic: bool = True,
905
+ ):
906
+ encoder_outputs = self.encoder(
907
+ input_ids=input_ids,
908
+ attention_mask=attention_mask,
909
+ position_ids=position_ids,
910
+ output_attentions=output_attentions,
911
+ output_hidden_states=output_hidden_states,
912
+ return_dict=return_dict,
913
+ deterministic=deterministic,
914
+ )
915
+
916
+ decoder_outputs = self.decoder(
917
+ input_ids=decoder_input_ids,
918
+ attention_mask=decoder_attention_mask,
919
+ position_ids=decoder_position_ids,
920
+ encoder_hidden_states=encoder_outputs[0],
921
+ encoder_attention_mask=attention_mask,
922
+ output_attentions=output_attentions,
923
+ output_hidden_states=output_hidden_states,
924
+ return_dict=return_dict,
925
+ deterministic=deterministic,
926
+ )
927
+
928
+ if not return_dict:
929
+ return decoder_outputs + encoder_outputs
930
+
931
+ return FlaxSeq2SeqModelOutput(
932
+ last_hidden_state=decoder_outputs.last_hidden_state,
933
+ decoder_hidden_states=decoder_outputs.hidden_states,
934
+ decoder_attentions=decoder_outputs.attentions,
935
+ cross_attentions=decoder_outputs.cross_attentions,
936
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
937
+ encoder_hidden_states=encoder_outputs.hidden_states,
938
+ encoder_attentions=encoder_outputs.attentions,
939
+ )
940
+
941
+
942
+ class FlaxMBartPreTrainedModel(FlaxPreTrainedModel):
943
+ config_class = MBartConfig
944
+ base_model_prefix: str = "model"
945
+ module_class: nn.Module = None
946
+
947
+ def __init__(
948
+ self,
949
+ config: MBartConfig,
950
+ input_shape: Tuple[int] = (1, 1),
951
+ seed: int = 0,
952
+ dtype: jnp.dtype = jnp.float32,
953
+ _do_init: bool = True,
954
+ **kwargs,
955
+ ):
956
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
957
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
958
+
959
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
960
+ # init input tensors
961
+ input_ids = jnp.zeros(input_shape, dtype="i4")
962
+ # make sure initialization pass will work for FlaxMBartForSequenceClassificationModule
963
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
964
+ attention_mask = jnp.ones_like(input_ids)
965
+ decoder_input_ids = input_ids
966
+ decoder_attention_mask = jnp.ones_like(input_ids)
967
+
968
+ batch_size, sequence_length = input_ids.shape
969
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
970
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
971
+
972
+ params_rng, dropout_rng = jax.random.split(rng)
973
+ rngs = {"params": params_rng, "dropout": dropout_rng}
974
+
975
+ random_params = self.module.init(
976
+ rngs,
977
+ input_ids,
978
+ attention_mask,
979
+ decoder_input_ids,
980
+ decoder_attention_mask,
981
+ position_ids,
982
+ decoder_position_ids,
983
+ )["params"]
984
+
985
+ if params is not None:
986
+ random_params = flatten_dict(unfreeze(random_params))
987
+ params = flatten_dict(unfreeze(params))
988
+ for missing_key in self._missing_keys:
989
+ params[missing_key] = random_params[missing_key]
990
+ self._missing_keys = set()
991
+ return freeze(unflatten_dict(params))
992
+ else:
993
+ return random_params
994
+
995
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel.init_cache with Bart->MBart
996
+ def init_cache(self, batch_size, max_length, encoder_outputs):
997
+ r"""
998
+ Args:
999
+ batch_size (`int`):
1000
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
1001
+ max_length (`int`):
1002
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
1003
+ cache.
1004
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
1005
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
1006
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
1007
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
1008
+ cross-attention of the decoder.
1009
+ """
1010
+ # init input variables to retrieve cache
1011
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
1012
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
1013
+ decoder_position_ids = jnp.broadcast_to(
1014
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
1015
+ )
1016
+
1017
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1018
+ decoder_module = module._get_decoder_module()
1019
+ return decoder_module(
1020
+ decoder_input_ids,
1021
+ decoder_attention_mask,
1022
+ decoder_position_ids,
1023
+ **kwargs,
1024
+ )
1025
+
1026
+ init_variables = self.module.init(
1027
+ jax.random.PRNGKey(0),
1028
+ decoder_input_ids=decoder_input_ids,
1029
+ decoder_attention_mask=decoder_attention_mask,
1030
+ decoder_position_ids=decoder_position_ids,
1031
+ encoder_hidden_states=encoder_outputs[0],
1032
+ init_cache=True,
1033
+ method=_decoder_forward, # we only need to call the decoder to init the cache
1034
+ )
1035
+ return unfreeze(init_variables["cache"])
1036
+
1037
+ @add_start_docstrings(MBART_ENCODE_INPUTS_DOCSTRING)
1038
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=MBartConfig)
1039
+ def encode(
1040
+ self,
1041
+ input_ids: jnp.ndarray,
1042
+ attention_mask: Optional[jnp.ndarray] = None,
1043
+ position_ids: Optional[jnp.ndarray] = None,
1044
+ output_attentions: Optional[bool] = None,
1045
+ output_hidden_states: Optional[bool] = None,
1046
+ return_dict: Optional[bool] = None,
1047
+ train: bool = False,
1048
+ params: dict = None,
1049
+ dropout_rng: PRNGKey = None,
1050
+ ):
1051
+ r"""
1052
+ Returns:
1053
+
1054
+ Example:
1055
+
1056
+ ```python
1057
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
1058
+
1059
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
1060
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
1061
+
1062
+ >>> text = "My friends are cool but they eat too many carbs."
1063
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
1064
+ >>> encoder_outputs = model.encode(**inputs)
1065
+ ```"""
1066
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1067
+ output_hidden_states = (
1068
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1069
+ )
1070
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1071
+
1072
+ if attention_mask is None:
1073
+ attention_mask = jnp.ones_like(input_ids)
1074
+ if position_ids is None:
1075
+ batch_size, sequence_length = input_ids.shape
1076
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1077
+
1078
+ # Handle any PRNG if needed
1079
+ rngs = {}
1080
+ if dropout_rng is not None:
1081
+ rngs["dropout"] = dropout_rng
1082
+
1083
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
1084
+ encode_module = module._get_encoder_module()
1085
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
1086
+
1087
+ return self.module.apply(
1088
+ {"params": params or self.params},
1089
+ input_ids=jnp.array(input_ids, dtype="i4"),
1090
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1091
+ position_ids=jnp.array(position_ids, dtype="i4"),
1092
+ output_attentions=output_attentions,
1093
+ output_hidden_states=output_hidden_states,
1094
+ return_dict=return_dict,
1095
+ deterministic=not train,
1096
+ rngs=rngs,
1097
+ method=_encoder_forward,
1098
+ )
1099
+
1100
+ @add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING)
1101
+ @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=MBartConfig)
1102
+ def decode(
1103
+ self,
1104
+ decoder_input_ids,
1105
+ encoder_outputs,
1106
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1107
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1108
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1109
+ past_key_values: dict = None,
1110
+ output_attentions: Optional[bool] = None,
1111
+ output_hidden_states: Optional[bool] = None,
1112
+ return_dict: Optional[bool] = None,
1113
+ train: bool = False,
1114
+ params: dict = None,
1115
+ dropout_rng: PRNGKey = None,
1116
+ ):
1117
+ r"""
1118
+ Returns:
1119
+
1120
+ Example:
1121
+
1122
+ ```python
1123
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
1124
+
1125
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
1126
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
1127
+
1128
+ >>> text = "My friends are cool but they eat too many carbs."
1129
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
1130
+ >>> encoder_outputs = model.encode(**inputs)
1131
+
1132
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1133
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1134
+
1135
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1136
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
1137
+ ```"""
1138
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1139
+ output_hidden_states = (
1140
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1141
+ )
1142
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1143
+
1144
+ encoder_hidden_states = encoder_outputs[0]
1145
+ if encoder_attention_mask is None:
1146
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1147
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1148
+
1149
+ batch_size, sequence_length = decoder_input_ids.shape
1150
+ if decoder_attention_mask is None:
1151
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1152
+
1153
+ if decoder_position_ids is None:
1154
+ if past_key_values is not None:
1155
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1156
+
1157
+ decoder_position_ids = jnp.broadcast_to(
1158
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1159
+ )
1160
+
1161
+ # Handle any PRNG if needed
1162
+ rngs = {}
1163
+ if dropout_rng is not None:
1164
+ rngs["dropout"] = dropout_rng
1165
+
1166
+ inputs = {"params": params or self.params}
1167
+
1168
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1169
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1170
+ # it can be changed by FlaxMBartAttention module
1171
+ if past_key_values:
1172
+ inputs["cache"] = past_key_values
1173
+ mutable = ["cache"]
1174
+ else:
1175
+ mutable = False
1176
+
1177
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1178
+ decoder_module = module._get_decoder_module()
1179
+ return decoder_module(
1180
+ decoder_input_ids,
1181
+ decoder_attention_mask,
1182
+ decoder_position_ids,
1183
+ **kwargs,
1184
+ )
1185
+
1186
+ outputs = self.module.apply(
1187
+ inputs,
1188
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1189
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1190
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1191
+ encoder_hidden_states=encoder_hidden_states,
1192
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1193
+ output_attentions=output_attentions,
1194
+ output_hidden_states=output_hidden_states,
1195
+ return_dict=return_dict,
1196
+ deterministic=not train,
1197
+ rngs=rngs,
1198
+ mutable=mutable,
1199
+ method=_decoder_forward,
1200
+ )
1201
+
1202
+ # add updated cache to model output
1203
+ if past_key_values is not None and return_dict:
1204
+ outputs, past = outputs
1205
+ outputs["past_key_values"] = unfreeze(past["cache"])
1206
+ return outputs
1207
+ elif past_key_values is not None and not return_dict:
1208
+ outputs, past = outputs
1209
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1210
+
1211
+ return outputs
1212
+
1213
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
1214
+ def __call__(
1215
+ self,
1216
+ input_ids: jnp.ndarray,
1217
+ attention_mask: Optional[jnp.ndarray] = None,
1218
+ decoder_input_ids: Optional[jnp.ndarray] = None,
1219
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1220
+ position_ids: Optional[jnp.ndarray] = None,
1221
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1222
+ output_attentions: Optional[bool] = None,
1223
+ output_hidden_states: Optional[bool] = None,
1224
+ return_dict: Optional[bool] = None,
1225
+ train: bool = False,
1226
+ params: dict = None,
1227
+ dropout_rng: PRNGKey = None,
1228
+ ):
1229
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1230
+ output_hidden_states = (
1231
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1232
+ )
1233
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1234
+
1235
+ # prepare encoder inputs
1236
+ if attention_mask is None:
1237
+ attention_mask = jnp.ones_like(input_ids)
1238
+ if position_ids is None:
1239
+ batch_size, sequence_length = input_ids.shape
1240
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
1241
+
1242
+ # prepare decoder inputs
1243
+ if decoder_input_ids is None:
1244
+ decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
1245
+ if decoder_attention_mask is None:
1246
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
1247
+ if decoder_position_ids is None:
1248
+ batch_size, sequence_length = decoder_input_ids.shape
1249
+ decoder_position_ids = jnp.broadcast_to(
1250
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1251
+ )
1252
+
1253
+ # Handle any PRNG if needed
1254
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
1255
+
1256
+ return self.module.apply(
1257
+ {"params": params or self.params},
1258
+ input_ids=jnp.array(input_ids, dtype="i4"),
1259
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
1260
+ position_ids=jnp.array(position_ids, dtype="i4"),
1261
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1262
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1263
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1264
+ output_attentions=output_attentions,
1265
+ output_hidden_states=output_hidden_states,
1266
+ return_dict=return_dict,
1267
+ deterministic=not train,
1268
+ rngs=rngs,
1269
+ )
1270
+
1271
+
1272
+ @add_start_docstrings(
1273
+ "The bare MBart Model transformer outputting raw hidden-states without any specific head on top.",
1274
+ MBART_START_DOCSTRING,
1275
+ )
1276
+ class FlaxMBartModel(FlaxMBartPreTrainedModel):
1277
+ config: MBartConfig
1278
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
1279
+ module_class = FlaxMBartModule
1280
+
1281
+
1282
+ append_call_sample_docstring(FlaxMBartModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
1283
+
1284
+
1285
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->MBart
1286
+ class FlaxMBartForConditionalGenerationModule(nn.Module):
1287
+ config: MBartConfig
1288
+ dtype: jnp.dtype = jnp.float32
1289
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
1290
+
1291
+ def setup(self):
1292
+ self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
1293
+ self.lm_head = nn.Dense(
1294
+ self.model.shared.num_embeddings,
1295
+ use_bias=False,
1296
+ dtype=self.dtype,
1297
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
1298
+ )
1299
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
1300
+
1301
+ def _get_encoder_module(self):
1302
+ return self.model.encoder
1303
+
1304
+ def _get_decoder_module(self):
1305
+ return self.model.decoder
1306
+
1307
+ def __call__(
1308
+ self,
1309
+ input_ids,
1310
+ attention_mask,
1311
+ decoder_input_ids,
1312
+ decoder_attention_mask,
1313
+ position_ids,
1314
+ decoder_position_ids,
1315
+ output_attentions: bool = False,
1316
+ output_hidden_states: bool = False,
1317
+ return_dict: bool = True,
1318
+ deterministic: bool = True,
1319
+ ):
1320
+ outputs = self.model(
1321
+ input_ids=input_ids,
1322
+ attention_mask=attention_mask,
1323
+ decoder_input_ids=decoder_input_ids,
1324
+ decoder_attention_mask=decoder_attention_mask,
1325
+ position_ids=position_ids,
1326
+ decoder_position_ids=decoder_position_ids,
1327
+ output_attentions=output_attentions,
1328
+ output_hidden_states=output_hidden_states,
1329
+ return_dict=return_dict,
1330
+ deterministic=deterministic,
1331
+ )
1332
+
1333
+ hidden_states = outputs[0]
1334
+
1335
+ if self.config.tie_word_embeddings:
1336
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
1337
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1338
+ else:
1339
+ lm_logits = self.lm_head(hidden_states)
1340
+
1341
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
1342
+
1343
+ if not return_dict:
1344
+ output = (lm_logits,) + outputs[1:]
1345
+ return output
1346
+
1347
+ return FlaxSeq2SeqLMOutput(
1348
+ logits=lm_logits,
1349
+ decoder_hidden_states=outputs.decoder_hidden_states,
1350
+ decoder_attentions=outputs.decoder_attentions,
1351
+ cross_attentions=outputs.cross_attentions,
1352
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1353
+ encoder_hidden_states=outputs.encoder_hidden_states,
1354
+ encoder_attentions=outputs.encoder_attentions,
1355
+ )
1356
+
1357
+
1358
+ @add_start_docstrings(
1359
+ "The MMBart Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING
1360
+ )
1361
+ class FlaxMBartForConditionalGeneration(FlaxMBartPreTrainedModel):
1362
+ module_class = FlaxMBartForConditionalGenerationModule
1363
+ dtype: jnp.dtype = jnp.float32
1364
+
1365
+ @add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING)
1366
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MBartConfig)
1367
+ def decode(
1368
+ self,
1369
+ decoder_input_ids,
1370
+ encoder_outputs,
1371
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1372
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
1373
+ decoder_position_ids: Optional[jnp.ndarray] = None,
1374
+ past_key_values: dict = None,
1375
+ output_attentions: Optional[bool] = None,
1376
+ output_hidden_states: Optional[bool] = None,
1377
+ return_dict: Optional[bool] = None,
1378
+ train: bool = False,
1379
+ params: dict = None,
1380
+ dropout_rng: PRNGKey = None,
1381
+ ):
1382
+ r"""
1383
+ Returns:
1384
+
1385
+ Example:
1386
+
1387
+ ```python
1388
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
1389
+
1390
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
1391
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
1392
+
1393
+ >>> text = "My friends are cool but they eat too many carbs."
1394
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
1395
+ >>> encoder_outputs = model.encode(**inputs)
1396
+
1397
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
1398
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
1399
+
1400
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
1401
+ >>> logits = outputs.logits
1402
+ ```"""
1403
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1404
+ output_hidden_states = (
1405
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1406
+ )
1407
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1408
+
1409
+ encoder_hidden_states = encoder_outputs[0]
1410
+ if encoder_attention_mask is None:
1411
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
1412
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
1413
+
1414
+ batch_size, sequence_length = decoder_input_ids.shape
1415
+ if decoder_attention_mask is None:
1416
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
1417
+
1418
+ if decoder_position_ids is None:
1419
+ if past_key_values is not None:
1420
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
1421
+
1422
+ decoder_position_ids = jnp.broadcast_to(
1423
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
1424
+ )
1425
+
1426
+ # Handle any PRNG if needed
1427
+ rngs = {}
1428
+ if dropout_rng is not None:
1429
+ rngs["dropout"] = dropout_rng
1430
+
1431
+ inputs = {"params": params or self.params}
1432
+
1433
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
1434
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
1435
+ # it can be changed by FlaxMBartAttention module
1436
+ if past_key_values:
1437
+ inputs["cache"] = past_key_values
1438
+ mutable = ["cache"]
1439
+ else:
1440
+ mutable = False
1441
+
1442
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
1443
+ decoder_module = module._get_decoder_module()
1444
+ outputs = decoder_module(
1445
+ decoder_input_ids,
1446
+ decoder_attention_mask,
1447
+ decoder_position_ids,
1448
+ **kwargs,
1449
+ )
1450
+ hidden_states = outputs[0]
1451
+
1452
+ if self.config.tie_word_embeddings:
1453
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
1454
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
1455
+ else:
1456
+ lm_logits = module.lm_head(hidden_states)
1457
+
1458
+ lm_logits += module.final_logits_bias.astype(self.dtype)
1459
+ return lm_logits, outputs
1460
+
1461
+ outputs = self.module.apply(
1462
+ inputs,
1463
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
1464
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
1465
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1466
+ encoder_hidden_states=encoder_hidden_states,
1467
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
1468
+ output_attentions=output_attentions,
1469
+ output_hidden_states=output_hidden_states,
1470
+ return_dict=return_dict,
1471
+ deterministic=not train,
1472
+ rngs=rngs,
1473
+ mutable=mutable,
1474
+ method=_decoder_forward,
1475
+ )
1476
+
1477
+ if past_key_values is None:
1478
+ lm_logits, decoder_outputs = outputs
1479
+ else:
1480
+ (lm_logits, decoder_outputs), past = outputs
1481
+
1482
+ if return_dict:
1483
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
1484
+ logits=lm_logits,
1485
+ hidden_states=decoder_outputs.hidden_states,
1486
+ attentions=decoder_outputs.attentions,
1487
+ cross_attentions=decoder_outputs.cross_attentions,
1488
+ )
1489
+ else:
1490
+ outputs = (lm_logits,) + decoder_outputs[1:]
1491
+
1492
+ # add updated cache to model output
1493
+ if past_key_values is not None and return_dict:
1494
+ outputs["past_key_values"] = unfreeze(past["cache"])
1495
+ return outputs
1496
+ elif past_key_values is not None and not return_dict:
1497
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
1498
+
1499
+ return outputs
1500
+
1501
+ def prepare_inputs_for_generation(
1502
+ self,
1503
+ decoder_input_ids,
1504
+ max_length,
1505
+ attention_mask: Optional[jax.Array] = None,
1506
+ decoder_attention_mask: Optional[jax.Array] = None,
1507
+ encoder_outputs=None,
1508
+ **kwargs,
1509
+ ):
1510
+ # initializing the cache
1511
+ batch_size, seq_length = decoder_input_ids.shape
1512
+
1513
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
1514
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1515
+ # But since the decoder uses a causal mask, those positions are masked anyways.
1516
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
1517
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1518
+ if decoder_attention_mask is not None:
1519
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
1520
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
1521
+ else:
1522
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1523
+
1524
+ return {
1525
+ "past_key_values": past_key_values,
1526
+ "encoder_outputs": encoder_outputs,
1527
+ "encoder_attention_mask": attention_mask,
1528
+ "decoder_attention_mask": extended_attention_mask,
1529
+ "decoder_position_ids": position_ids,
1530
+ }
1531
+
1532
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1533
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1534
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
1535
+ return model_kwargs
1536
+
1537
+
1538
+ FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING = r"""
1539
+ Returns:
1540
+
1541
+ Summarization example:
1542
+
1543
+ ```python
1544
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration, MBartConfig
1545
+
1546
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
1547
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
1548
+
1549
+ >>> ARTICLE_TO_SUMMARIZE = "Meine Freunde sind cool, aber sie essen zu viel Kuchen."
1550
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")
1551
+
1552
+ >>> # Generate Summary
1553
+ >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5).sequences
1554
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
1555
+ ```
1556
+
1557
+ Mask filling example:
1558
+
1559
+ ```python
1560
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
1561
+
1562
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
1563
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
1564
+
1565
+ >>> # de_DE is the language symbol id <LID> for German
1566
+ >>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE"
1567
+ >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="np")["input_ids"]
1568
+
1569
+ >>> logits = model(input_ids).logits
1570
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item()
1571
+ >>> probs = logits[0, masked_index].softmax(dim=0)
1572
+ >>> values, predictions = probs.topk(5)
1573
+
1574
+ >>> tokenizer.decode(predictions).split()
1575
+ ```
1576
+ """
1577
+
1578
+ overwrite_call_docstring(
1579
+ FlaxMBartForConditionalGeneration, MBART_INPUTS_DOCSTRING + FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING
1580
+ )
1581
+ append_replace_return_docstrings(
1582
+ FlaxMBartForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
1583
+ )
1584
+
1585
+
1586
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForSequenceClassificationModule with Bart->MBart
1587
+ class FlaxMBartForSequenceClassificationModule(nn.Module):
1588
+ config: MBartConfig
1589
+ dtype: jnp.dtype = jnp.float32
1590
+ num_labels: Optional[int] = None
1591
+
1592
+ def setup(self):
1593
+ self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
1594
+ self.classification_head = FlaxMBartClassificationHead(
1595
+ config=self.config,
1596
+ inner_dim=self.config.d_model,
1597
+ num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels,
1598
+ pooler_dropout=self.config.classifier_dropout,
1599
+ )
1600
+
1601
+ def _get_encoder_module(self):
1602
+ return self.model.encoder
1603
+
1604
+ def _get_decoder_module(self):
1605
+ return self.model.decoder
1606
+
1607
+ def __call__(
1608
+ self,
1609
+ input_ids,
1610
+ attention_mask,
1611
+ decoder_input_ids,
1612
+ decoder_attention_mask,
1613
+ position_ids,
1614
+ decoder_position_ids,
1615
+ output_attentions: bool = False,
1616
+ output_hidden_states: bool = False,
1617
+ return_dict: bool = True,
1618
+ deterministic: bool = True,
1619
+ ):
1620
+ outputs = self.model(
1621
+ input_ids=input_ids,
1622
+ attention_mask=attention_mask,
1623
+ decoder_input_ids=decoder_input_ids,
1624
+ decoder_attention_mask=decoder_attention_mask,
1625
+ position_ids=position_ids,
1626
+ decoder_position_ids=decoder_position_ids,
1627
+ output_attentions=output_attentions,
1628
+ output_hidden_states=output_hidden_states,
1629
+ return_dict=return_dict,
1630
+ deterministic=deterministic,
1631
+ )
1632
+
1633
+ hidden_states = outputs[0] # last hidden state
1634
+
1635
+ eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0)
1636
+
1637
+ # The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation
1638
+ if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer:
1639
+ if len(jnp.unique(eos_mask.sum(1))) > 1:
1640
+ raise ValueError("All examples must have the same number of <eos> tokens.")
1641
+
1642
+ if any(eos_mask.sum(1) == 0):
1643
+ raise ValueError("There are missing <eos> tokens in input_ids")
1644
+
1645
+ # Ensure to keep 1 only for the last <eos> token for each example
1646
+ eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6
1647
+ eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0)
1648
+
1649
+ sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1)
1650
+ logits = self.classification_head(sentence_representation, deterministic=deterministic)
1651
+
1652
+ if not return_dict:
1653
+ output = (logits,) + outputs[1:]
1654
+ return output
1655
+
1656
+ return FlaxSeq2SeqSequenceClassifierOutput(
1657
+ logits=logits,
1658
+ decoder_hidden_states=outputs.decoder_hidden_states,
1659
+ decoder_attentions=outputs.decoder_attentions,
1660
+ cross_attentions=outputs.cross_attentions,
1661
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1662
+ encoder_hidden_states=outputs.encoder_hidden_states,
1663
+ encoder_attentions=outputs.encoder_attentions,
1664
+ )
1665
+
1666
+
1667
+ @add_start_docstrings(
1668
+ """
1669
+ MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
1670
+ tasks.
1671
+ """,
1672
+ MBART_START_DOCSTRING,
1673
+ )
1674
+ class FlaxMBartForSequenceClassification(FlaxMBartPreTrainedModel):
1675
+ module_class = FlaxMBartForSequenceClassificationModule
1676
+ dtype = jnp.float32
1677
+
1678
+
1679
+ append_call_sample_docstring(
1680
+ FlaxMBartForSequenceClassification,
1681
+ _CHECKPOINT_FOR_DOC,
1682
+ FlaxSeq2SeqSequenceClassifierOutput,
1683
+ _CONFIG_FOR_DOC,
1684
+ )
1685
+
1686
+
1687
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForQuestionAnsweringModule with Bart->MBart
1688
+ class FlaxMBartForQuestionAnsweringModule(nn.Module):
1689
+ config: MBartConfig
1690
+ dtype: jnp.dtype = jnp.float32
1691
+ num_labels = 2
1692
+
1693
+ def setup(self):
1694
+ self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
1695
+ self.qa_outputs = nn.Dense(
1696
+ self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
1697
+ )
1698
+
1699
+ def _get_encoder_module(self):
1700
+ return self.model.encoder
1701
+
1702
+ def _get_decoder_module(self):
1703
+ return self.model.decoder
1704
+
1705
+ def __call__(
1706
+ self,
1707
+ input_ids,
1708
+ attention_mask,
1709
+ decoder_input_ids,
1710
+ decoder_attention_mask,
1711
+ position_ids,
1712
+ decoder_position_ids,
1713
+ output_attentions: bool = False,
1714
+ output_hidden_states: bool = False,
1715
+ return_dict: bool = True,
1716
+ deterministic: bool = True,
1717
+ ):
1718
+ outputs = self.model(
1719
+ input_ids=input_ids,
1720
+ attention_mask=attention_mask,
1721
+ decoder_input_ids=decoder_input_ids,
1722
+ decoder_attention_mask=decoder_attention_mask,
1723
+ position_ids=position_ids,
1724
+ decoder_position_ids=decoder_position_ids,
1725
+ output_attentions=output_attentions,
1726
+ output_hidden_states=output_hidden_states,
1727
+ return_dict=return_dict,
1728
+ deterministic=deterministic,
1729
+ )
1730
+
1731
+ sequence_output = outputs[0]
1732
+
1733
+ logits = self.qa_outputs(sequence_output)
1734
+ start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1)
1735
+ start_logits = start_logits.squeeze(-1)
1736
+ end_logits = end_logits.squeeze(-1)
1737
+
1738
+ if not return_dict:
1739
+ output = (start_logits, end_logits) + outputs[1:]
1740
+ return output
1741
+
1742
+ return FlaxSeq2SeqQuestionAnsweringModelOutput(
1743
+ start_logits=start_logits,
1744
+ end_logits=end_logits,
1745
+ decoder_hidden_states=outputs.decoder_hidden_states,
1746
+ decoder_attentions=outputs.decoder_attentions,
1747
+ cross_attentions=outputs.cross_attentions,
1748
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1749
+ encoder_hidden_states=outputs.encoder_hidden_states,
1750
+ encoder_attentions=outputs.encoder_attentions,
1751
+ )
1752
+
1753
+
1754
+ @add_start_docstrings(
1755
+ """
1756
+ MBart Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1757
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1758
+ """,
1759
+ MBART_START_DOCSTRING,
1760
+ )
1761
+ class FlaxMBartForQuestionAnswering(FlaxMBartPreTrainedModel):
1762
+ module_class = FlaxMBartForQuestionAnsweringModule
1763
+ dtype = jnp.float32
1764
+
1765
+
1766
+ append_call_sample_docstring(
1767
+ FlaxMBartForQuestionAnswering,
1768
+ _CHECKPOINT_FOR_DOC,
1769
+ FlaxSeq2SeqQuestionAnsweringModelOutput,
1770
+ _CONFIG_FOR_DOC,
1771
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/modeling_mbart.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/modeling_tf_mbart.py ADDED
@@ -0,0 +1,1573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 MBart model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import random
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import (
27
+ TFBaseModelOutput,
28
+ TFBaseModelOutputWithPastAndCrossAttentions,
29
+ TFSeq2SeqLMOutput,
30
+ TFSeq2SeqModelOutput,
31
+ )
32
+
33
+ # Public API
34
+ from ...modeling_tf_utils import (
35
+ TFCausalLanguageModelingLoss,
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ keras,
39
+ keras_serializable,
40
+ unpack_inputs,
41
+ )
42
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
43
+ from ...utils import (
44
+ add_code_sample_docstrings,
45
+ add_end_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ logging,
49
+ replace_return_docstrings,
50
+ )
51
+ from .configuration_mbart import MBartConfig
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
57
+ _CONFIG_FOR_DOC = "MBartConfig"
58
+
59
+
60
+ LARGE_NEGATIVE = -1e8
61
+
62
+
63
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int):
64
+ """
65
+ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not
66
+ have a single `decoder_start_token_id` in contrast to other Bart-like models.
67
+ """
68
+ if pad_token_id is None:
69
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
70
+ # replace possible -100 values in labels by `pad_token_id`
71
+ input_ids = tf.where(
72
+ input_ids == -100, tf.fill(shape_list(input_ids), tf.cast(pad_token_id, input_ids.dtype)), input_ids
73
+ )
74
+ language_id_index = (
75
+ tf.reduce_sum(tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=input_ids.dtype), axis=-1) - 1
76
+ )
77
+ language_id_index = tf.stack(
78
+ [tf.range(shape_list(input_ids)[0], dtype=input_ids.dtype), language_id_index], axis=-1
79
+ )
80
+ languages_ids = tf.gather_nd(input_ids, language_id_index)
81
+
82
+ shifted_input_ids = tf.concat([tf.expand_dims(languages_ids, axis=-1), input_ids[:, :-1]], axis=-1)
83
+
84
+ return shifted_input_ids
85
+
86
+
87
+ # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
88
+ def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
89
+ """
90
+ Make causal mask used for bi-directional self-attention.
91
+ """
92
+ bsz = input_ids_shape[0]
93
+ tgt_len = input_ids_shape[1]
94
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
95
+ mask_cond = tf.range(shape_list(mask)[-1])
96
+
97
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
98
+
99
+ if past_key_values_length > 0:
100
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
101
+
102
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
103
+
104
+
105
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
106
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
107
+ """
108
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
109
+ """
110
+ src_len = shape_list(mask)[1]
111
+ tgt_len = tgt_len if tgt_len is not None else src_len
112
+ one_cst = tf.constant(1.0)
113
+ mask = tf.cast(mask, dtype=one_cst.dtype)
114
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
115
+
116
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
117
+
118
+
119
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartLearnedPositionalEmbedding with Bart->MBart
120
+ class TFMBartLearnedPositionalEmbedding(keras.layers.Embedding):
121
+ """
122
+ This module learns positional embeddings up to a fixed maximum size.
123
+ """
124
+
125
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
126
+ # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
127
+ # and adjust num_embeddings appropriately. Other models don't have this hack
128
+ self.offset = 2
129
+ super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
130
+
131
+ def call(
132
+ self,
133
+ input_shape: Optional[tf.TensorShape] = None,
134
+ past_key_values_length: int = 0,
135
+ position_ids: tf.Tensor | None = None,
136
+ ):
137
+ """Input is expected to be of size [bsz x seqlen]."""
138
+ if position_ids is None:
139
+ seq_len = input_shape[1]
140
+ position_ids = tf.range(seq_len, delta=1, name="range")
141
+ position_ids += past_key_values_length
142
+
143
+ offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32
144
+ return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype))
145
+
146
+
147
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->MBart
148
+ class TFMBartAttention(keras.layers.Layer):
149
+ """Multi-headed attention from "Attention Is All You Need"""
150
+
151
+ def __init__(
152
+ self,
153
+ embed_dim: int,
154
+ num_heads: int,
155
+ dropout: float = 0.0,
156
+ is_decoder: bool = False,
157
+ bias: bool = True,
158
+ **kwargs,
159
+ ):
160
+ super().__init__(**kwargs)
161
+ self.embed_dim = embed_dim
162
+
163
+ self.num_heads = num_heads
164
+ self.dropout = keras.layers.Dropout(dropout)
165
+ self.head_dim = embed_dim // num_heads
166
+ if (self.head_dim * num_heads) != self.embed_dim:
167
+ raise ValueError(
168
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
169
+ f" and `num_heads`: {num_heads})."
170
+ )
171
+ self.scaling = self.head_dim**-0.5
172
+ self.is_decoder = is_decoder
173
+
174
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
175
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
176
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
177
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
178
+
179
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
180
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
181
+
182
+ def call(
183
+ self,
184
+ hidden_states: tf.Tensor,
185
+ key_value_states: tf.Tensor | None = None,
186
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
187
+ attention_mask: tf.Tensor | None = None,
188
+ layer_head_mask: tf.Tensor | None = None,
189
+ training: Optional[bool] = False,
190
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
191
+ """Input shape: Batch x Time x Channel"""
192
+
193
+ # if key_value_states are provided this layer is used as a cross-attention layer
194
+ # for the decoder
195
+ is_cross_attention = key_value_states is not None
196
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
197
+
198
+ # get query proj
199
+ query_states = self.q_proj(hidden_states) * self.scaling
200
+ # get key, value proj
201
+ if is_cross_attention and past_key_value is not None:
202
+ # reuse k,v, cross_attentions
203
+ key_states = past_key_value[0]
204
+ value_states = past_key_value[1]
205
+ elif is_cross_attention:
206
+ # cross_attentions
207
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
208
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
209
+ elif past_key_value is not None:
210
+ # reuse k, v, self_attention
211
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
212
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
213
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
214
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
215
+ else:
216
+ # self_attention
217
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
218
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
219
+
220
+ if self.is_decoder:
221
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
222
+ # Further calls to cross_attention layer can then reuse all cross-attention
223
+ # key/value_states (first "if" case)
224
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
225
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
226
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
227
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
228
+ past_key_value = (key_states, value_states)
229
+
230
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
231
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
232
+ key_states = tf.reshape(key_states, proj_shape)
233
+ value_states = tf.reshape(value_states, proj_shape)
234
+
235
+ src_len = shape_list(key_states)[1]
236
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
237
+
238
+ tf.debugging.assert_equal(
239
+ shape_list(attn_weights),
240
+ [bsz * self.num_heads, tgt_len, src_len],
241
+ message=(
242
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
243
+ f" {shape_list(attn_weights)}"
244
+ ),
245
+ )
246
+
247
+ if attention_mask is not None:
248
+ tf.debugging.assert_equal(
249
+ shape_list(attention_mask),
250
+ [bsz, 1, tgt_len, src_len],
251
+ message=(
252
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
253
+ f" {shape_list(attention_mask)}"
254
+ ),
255
+ )
256
+
257
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
258
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
259
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
260
+
261
+ attn_weights = stable_softmax(attn_weights, axis=-1)
262
+
263
+ if layer_head_mask is not None:
264
+ tf.debugging.assert_equal(
265
+ shape_list(layer_head_mask),
266
+ [self.num_heads],
267
+ message=(
268
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
269
+ f" {shape_list(layer_head_mask)}"
270
+ ),
271
+ )
272
+
273
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
274
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
275
+ )
276
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
277
+
278
+ attn_probs = self.dropout(attn_weights, training=training)
279
+ attn_output = tf.matmul(attn_probs, value_states)
280
+
281
+ tf.debugging.assert_equal(
282
+ shape_list(attn_output),
283
+ [bsz * self.num_heads, tgt_len, self.head_dim],
284
+ message=(
285
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
286
+ f" {shape_list(attn_output)}"
287
+ ),
288
+ )
289
+
290
+ attn_output = tf.transpose(
291
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
292
+ )
293
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
294
+
295
+ attn_output = self.out_proj(attn_output)
296
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
297
+
298
+ return attn_output, attn_weights, past_key_value
299
+
300
+ def build(self, input_shape=None):
301
+ if self.built:
302
+ return
303
+ self.built = True
304
+ if getattr(self, "k_proj", None) is not None:
305
+ with tf.name_scope(self.k_proj.name):
306
+ self.k_proj.build([None, None, self.embed_dim])
307
+ if getattr(self, "q_proj", None) is not None:
308
+ with tf.name_scope(self.q_proj.name):
309
+ self.q_proj.build([None, None, self.embed_dim])
310
+ if getattr(self, "v_proj", None) is not None:
311
+ with tf.name_scope(self.v_proj.name):
312
+ self.v_proj.build([None, None, self.embed_dim])
313
+ if getattr(self, "out_proj", None) is not None:
314
+ with tf.name_scope(self.out_proj.name):
315
+ self.out_proj.build([None, None, self.embed_dim])
316
+
317
+
318
+ class TFMBartEncoderLayer(keras.layers.Layer):
319
+ def __init__(self, config: MBartConfig, **kwargs):
320
+ super().__init__(**kwargs)
321
+ self.embed_dim = config.d_model
322
+ self.self_attn = TFMBartAttention(
323
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
324
+ )
325
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
326
+ self.dropout = keras.layers.Dropout(config.dropout)
327
+ self.activation_fn = get_tf_activation(config.activation_function)
328
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
329
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
330
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
331
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
332
+ self.config = config
333
+
334
+ def call(
335
+ self,
336
+ hidden_states: tf.Tensor,
337
+ attention_mask: tf.Tensor,
338
+ layer_head_mask: tf.Tensor,
339
+ training: Optional[bool] = False,
340
+ ):
341
+ """
342
+ Args:
343
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
344
+ attention_mask (`tf.Tensor`): attention mask of size
345
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
346
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
347
+ *(encoder_attention_heads,)*
348
+ """
349
+ residual = hidden_states
350
+ hidden_states = self.self_attn_layer_norm(hidden_states)
351
+ hidden_states, self_attn_weights, _ = self.self_attn(
352
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
353
+ )
354
+
355
+ tf.debugging.assert_equal(
356
+ shape_list(hidden_states),
357
+ shape_list(residual),
358
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
359
+ )
360
+
361
+ hidden_states = self.dropout(hidden_states, training=training)
362
+ hidden_states = residual + hidden_states
363
+
364
+ residual = hidden_states
365
+ hidden_states = self.final_layer_norm(hidden_states)
366
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
367
+ hidden_states = self.activation_dropout(hidden_states, training=training)
368
+ hidden_states = self.fc2(hidden_states)
369
+ hidden_states = self.dropout(hidden_states, training=training)
370
+ hidden_states = residual + hidden_states
371
+
372
+ return hidden_states, self_attn_weights
373
+
374
+ def build(self, input_shape=None):
375
+ if self.built:
376
+ return
377
+ self.built = True
378
+ if getattr(self, "self_attn", None) is not None:
379
+ with tf.name_scope(self.self_attn.name):
380
+ self.self_attn.build(None)
381
+ if getattr(self, "self_attn_layer_norm", None) is not None:
382
+ with tf.name_scope(self.self_attn_layer_norm.name):
383
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
384
+ if getattr(self, "fc1", None) is not None:
385
+ with tf.name_scope(self.fc1.name):
386
+ self.fc1.build([None, None, self.embed_dim])
387
+ if getattr(self, "fc2", None) is not None:
388
+ with tf.name_scope(self.fc2.name):
389
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
390
+ if getattr(self, "final_layer_norm", None) is not None:
391
+ with tf.name_scope(self.final_layer_norm.name):
392
+ self.final_layer_norm.build([None, None, self.embed_dim])
393
+
394
+
395
+ class TFMBartDecoderLayer(keras.layers.Layer):
396
+ def __init__(self, config: MBartConfig, **kwargs):
397
+ super().__init__(**kwargs)
398
+ self.embed_dim = config.d_model
399
+ self.self_attn = TFMBartAttention(
400
+ embed_dim=self.embed_dim,
401
+ num_heads=config.decoder_attention_heads,
402
+ dropout=config.attention_dropout,
403
+ name="self_attn",
404
+ is_decoder=True,
405
+ )
406
+ self.dropout = keras.layers.Dropout(config.dropout)
407
+ self.activation_fn = get_tf_activation(config.activation_function)
408
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
409
+
410
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
411
+ self.encoder_attn = TFMBartAttention(
412
+ self.embed_dim,
413
+ config.decoder_attention_heads,
414
+ dropout=config.attention_dropout,
415
+ name="encoder_attn",
416
+ is_decoder=True,
417
+ )
418
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
419
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
420
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
421
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
422
+ self.config = config
423
+
424
+ def call(
425
+ self,
426
+ hidden_states: tf.Tensor,
427
+ attention_mask: tf.Tensor | None = None,
428
+ encoder_hidden_states: tf.Tensor | None = None,
429
+ encoder_attention_mask: tf.Tensor | None = None,
430
+ layer_head_mask: tf.Tensor | None = None,
431
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
432
+ past_key_value: Tuple[tf.Tensor] | None = None,
433
+ training: Optional[bool] = False,
434
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
435
+ """
436
+ Args:
437
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
438
+ attention_mask (`tf.Tensor`): attention mask of size
439
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
440
+ encoder_hidden_states (`tf.Tensor`):
441
+ cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
442
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
443
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
444
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
445
+ *(decoder_attention_heads,)*
446
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
447
+ *(decoder_attention_heads,)*
448
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
449
+ """
450
+ residual = hidden_states
451
+ hidden_states = self.self_attn_layer_norm(hidden_states)
452
+
453
+ # Self Attention
454
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
455
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
456
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
457
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
458
+ hidden_states=hidden_states,
459
+ past_key_value=self_attn_past_key_value,
460
+ attention_mask=attention_mask,
461
+ layer_head_mask=layer_head_mask,
462
+ )
463
+ hidden_states = self.dropout(hidden_states, training=training)
464
+ hidden_states = residual + hidden_states
465
+
466
+ # Cross-Attention Block
467
+ cross_attn_present_key_value = None
468
+ cross_attn_weights = None
469
+ if encoder_hidden_states is not None:
470
+ residual = hidden_states
471
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
472
+
473
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
474
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
475
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
476
+ hidden_states=hidden_states,
477
+ key_value_states=encoder_hidden_states,
478
+ attention_mask=encoder_attention_mask,
479
+ layer_head_mask=cross_attn_layer_head_mask,
480
+ past_key_value=cross_attn_past_key_value,
481
+ )
482
+ hidden_states = self.dropout(hidden_states, training=training)
483
+ hidden_states = residual + hidden_states
484
+
485
+ # add cross-attn to positions 3,4 of present_key_value tuple
486
+ present_key_value = present_key_value + cross_attn_present_key_value
487
+
488
+ # Fully Connected
489
+ residual = hidden_states
490
+ hidden_states = self.final_layer_norm(hidden_states)
491
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
492
+ hidden_states = self.activation_dropout(hidden_states, training=training)
493
+ hidden_states = self.fc2(hidden_states)
494
+ hidden_states = self.dropout(hidden_states, training=training)
495
+ hidden_states = residual + hidden_states
496
+
497
+ return (
498
+ hidden_states,
499
+ self_attn_weights,
500
+ cross_attn_weights,
501
+ present_key_value,
502
+ )
503
+
504
+ def build(self, input_shape=None):
505
+ if self.built:
506
+ return
507
+ self.built = True
508
+ if getattr(self, "self_attn", None) is not None:
509
+ with tf.name_scope(self.self_attn.name):
510
+ self.self_attn.build(None)
511
+ if getattr(self, "self_attn_layer_norm", None) is not None:
512
+ with tf.name_scope(self.self_attn_layer_norm.name):
513
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
514
+ if getattr(self, "encoder_attn", None) is not None:
515
+ with tf.name_scope(self.encoder_attn.name):
516
+ self.encoder_attn.build(None)
517
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
518
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
519
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
520
+ if getattr(self, "fc1", None) is not None:
521
+ with tf.name_scope(self.fc1.name):
522
+ self.fc1.build([None, None, self.embed_dim])
523
+ if getattr(self, "fc2", None) is not None:
524
+ with tf.name_scope(self.fc2.name):
525
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
526
+ if getattr(self, "final_layer_norm", None) is not None:
527
+ with tf.name_scope(self.final_layer_norm.name):
528
+ self.final_layer_norm.build([None, None, self.embed_dim])
529
+
530
+
531
+ class TFMBartPreTrainedModel(TFPreTrainedModel):
532
+ config_class = MBartConfig
533
+ base_model_prefix = "model"
534
+
535
+
536
+ MBART_START_DOCSTRING = r"""
537
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
538
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
539
+ etc.)
540
+
541
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
542
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
543
+ behavior.
544
+
545
+ <Tip>
546
+
547
+ TensorFlow models and layers in `transformers` accept two formats as input:
548
+
549
+ - having all inputs as keyword arguments (like PyTorch models), or
550
+ - having all inputs as a list, tuple or dict in the first positional argument.
551
+
552
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
553
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
554
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
555
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
556
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
557
+ positional argument:
558
+
559
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
560
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
561
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
562
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
563
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
564
+
565
+ Note that when creating models and layers with
566
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
567
+ about any of this, as you can just pass inputs like you would to any other Python function!
568
+
569
+ </Tip>
570
+
571
+ Args:
572
+ config ([`MBartConfig`]): Model configuration class with all the parameters of the model.
573
+ Initializing with a config file does not load the weights associated with the model, only the
574
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
575
+ """
576
+
577
+ MBART_INPUTS_DOCSTRING = r"""
578
+ Args:
579
+ input_ids (`tf.Tensor` of shape `({0})`):
580
+ Indices of input sequence tokens in the vocabulary.
581
+
582
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
583
+ [`PreTrainedTokenizer.__call__`] for details.
584
+
585
+ [What are input IDs?](../glossary#input-ids)
586
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
587
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
588
+
589
+ - 1 for tokens that are **not masked**,
590
+ - 0 for tokens that are **masked**.
591
+
592
+ [What are attention masks?](../glossary#attention-mask)
593
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
594
+ Indices of decoder input sequence tokens in the vocabulary.
595
+
596
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
597
+ [`PreTrainedTokenizer.__call__`] for details.
598
+
599
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
600
+
601
+ MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
602
+ varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If
603
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
604
+ `past_key_values`).
605
+
606
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
607
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
608
+ for denoising pre-training following the paper.
609
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
610
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
611
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
612
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
613
+ range `[0, config.max_position_embeddings - 1]`.
614
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
615
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
616
+
617
+ - 1 indicates the head is **not masked**,
618
+ - 0 indicates the head is **masked**.
619
+
620
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
621
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
622
+
623
+ - 1 indicates the head is **not masked**,
624
+ - 0 indicates the head is **masked**.
625
+
626
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
627
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
628
+
629
+ - 1 indicates the head is **not masked**,
630
+ - 0 indicates the head is **masked**.
631
+
632
+ encoder_outputs (`tf.FloatTensor`, *optional*):
633
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
634
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
635
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
636
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
637
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
638
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
639
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
640
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
641
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
642
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
643
+ than the model's internal embedding lookup matrix.
644
+ use_cache (`bool`, *optional*, defaults to `True`):
645
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
646
+ `past_key_values`). Set to `False` during training, `True` during generation
647
+ output_attentions (`bool`, *optional*):
648
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
649
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
650
+ config will be used instead.
651
+ output_hidden_states (`bool`, *optional*):
652
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
653
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
654
+ used instead.
655
+ return_dict (`bool`, *optional*):
656
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
657
+ eager mode, in graph mode the value will always be set to True.
658
+ training (`bool`, *optional*, defaults to `False`):
659
+ Whether or not to use the model in training mode (some modules like dropout modules have different
660
+ behaviors between training and evaluation).
661
+ """
662
+
663
+ MBART_GENERATION_EXAMPLE = r"""
664
+ Translation example:
665
+
666
+ ```python
667
+ >>> from transformers import AutoTokenizer, TFMBartForConditionalGeneration
668
+
669
+ >>> model = TFMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
670
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-en-ro")
671
+
672
+ >>> example_english_phrase = "42 is the answer"
673
+ >>> inputs = tokenizer(example_english_phrase, return_tensors="tf")
674
+
675
+ >>> # Translate
676
+ >>> generated_ids = model.generate(**inputs, num_beams=4, max_length=5)
677
+ >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
678
+ '42 este răspuns'
679
+ ```
680
+
681
+ Mask filling example:
682
+
683
+ ```python
684
+ >>> from transformers import AutoTokenizer, TFMBartForConditionalGeneration
685
+ >>> import tensorflow as tf
686
+
687
+ >>> model = TFMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
688
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
689
+
690
+ >>> # de_DE is the language symbol id <LID> for German
691
+ >>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE"
692
+
693
+ >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="tf")["input_ids"]
694
+ >>> logits = model(input_ids).logits
695
+
696
+ >>> masked_index = tf.where(input_ids[0] == tokenizer.mask_token_id)[0, 0]
697
+ >>> probs = tf.nn.softmax(logits[0, masked_index], axis=0)
698
+ >>> values, predictions = tf.math.top_k(probs, 5)
699
+
700
+ >>> tokenizer.decode(predictions).split()
701
+ ['nett', 'sehr', 'ganz', 'nicht', 'so']
702
+ ```
703
+ """
704
+
705
+
706
+ @keras_serializable
707
+ class TFMBartEncoder(keras.layers.Layer):
708
+ config_class = MBartConfig
709
+ """
710
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
711
+ [`TFMBartEncoderLayer`].
712
+
713
+ Args:
714
+ config: MBartConfig
715
+ """
716
+
717
+ def __init__(self, config: MBartConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
718
+ super().__init__(**kwargs)
719
+ self.config = config
720
+ self.dropout = keras.layers.Dropout(config.dropout)
721
+ self.layerdrop = config.encoder_layerdrop
722
+ self.padding_idx = config.pad_token_id
723
+ self.max_source_positions = config.max_position_embeddings
724
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
725
+
726
+ self.embed_tokens = embed_tokens
727
+ self.embed_positions = TFMBartLearnedPositionalEmbedding(
728
+ config.max_position_embeddings,
729
+ config.d_model,
730
+ name="embed_positions",
731
+ )
732
+ self.layers = [TFMBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
733
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
734
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
735
+ self.embed_dim = config.d_model
736
+
737
+ def get_embed_tokens(self):
738
+ return self.embed_tokens
739
+
740
+ def set_embed_tokens(self, embed_tokens):
741
+ self.embed_tokens = embed_tokens
742
+
743
+ @unpack_inputs
744
+ def call(
745
+ self,
746
+ input_ids: TFModelInputType | None = None,
747
+ inputs_embeds: tf.Tensor | None = None,
748
+ attention_mask: tf.Tensor | None = None,
749
+ head_mask: tf.Tensor | None = None,
750
+ output_attentions: Optional[bool] = None,
751
+ output_hidden_states: Optional[bool] = None,
752
+ return_dict: Optional[bool] = None,
753
+ training: Optional[bool] = False,
754
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
755
+ """
756
+ Args:
757
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
758
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
759
+ provide it.
760
+
761
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
762
+ [`PreTrainedTokenizer.__call__`] for details.
763
+
764
+ [What are input IDs?](../glossary#input-ids)
765
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
766
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
767
+
768
+ - 1 for tokens that are **not masked**,
769
+ - 0 for tokens that are **masked**.
770
+
771
+ [What are attention masks?](../glossary#attention-mask)
772
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
773
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
774
+
775
+ - 1 indicates the head is **not masked**,
776
+ - 0 indicates the head is **masked**.
777
+
778
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
779
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
780
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
781
+ than the model's internal embedding lookup matrix.
782
+ output_attentions (`bool`, *optional*):
783
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
784
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
785
+ in the config will be used instead.
786
+ output_hidden_states (`bool`, *optional*):
787
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
788
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
789
+ will be used instead.
790
+ return_dict (`bool`, *optional*):
791
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
792
+ in eager mode, in graph mode the value will always be set to True.
793
+ training (`bool`, *optional*, defaults to `False`):
794
+ Whether or not to use the model in training mode (some modules like dropout modules have different
795
+ behaviors between training and evaluation).
796
+ """
797
+
798
+ if input_ids is not None and inputs_embeds is not None:
799
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
800
+ elif input_ids is not None:
801
+ input_shape = shape_list(input_ids)
802
+ elif inputs_embeds is not None:
803
+ input_shape = shape_list(inputs_embeds)[:-1]
804
+ else:
805
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
806
+
807
+ if inputs_embeds is None:
808
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
809
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
810
+
811
+ embed_pos = self.embed_positions(input_shape)
812
+ hidden_states = inputs_embeds + embed_pos
813
+ hidden_states = self.layernorm_embedding(hidden_states)
814
+ hidden_states = self.dropout(hidden_states, training=training)
815
+
816
+ # check attention mask and invert
817
+ if attention_mask is not None:
818
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
819
+ attention_mask = _expand_mask(attention_mask)
820
+ else:
821
+ attention_mask = None
822
+
823
+ encoder_states = () if output_hidden_states else None
824
+ all_attentions = () if output_attentions else None
825
+
826
+ # check if head_mask has a correct number of layers specified if desired
827
+ if head_mask is not None:
828
+ tf.debugging.assert_equal(
829
+ shape_list(head_mask)[0],
830
+ len(self.layers),
831
+ message=(
832
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
833
+ f" {shape_list(head_mask)[0]}."
834
+ ),
835
+ )
836
+
837
+ # encoder layers
838
+ for idx, encoder_layer in enumerate(self.layers):
839
+ if output_hidden_states:
840
+ encoder_states = encoder_states + (hidden_states,)
841
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
842
+ dropout_probability = random.uniform(0, 1)
843
+ if training and (dropout_probability < self.layerdrop): # skip the layer
844
+ continue
845
+
846
+ hidden_states, attn = encoder_layer(
847
+ hidden_states,
848
+ attention_mask,
849
+ head_mask[idx] if head_mask is not None else None,
850
+ )
851
+
852
+ if output_attentions:
853
+ all_attentions += (attn,)
854
+
855
+ hidden_states = self.layer_norm(hidden_states)
856
+
857
+ if output_hidden_states:
858
+ encoder_states = encoder_states + (hidden_states,)
859
+
860
+ if not return_dict:
861
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
862
+ return TFBaseModelOutput(
863
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
864
+ )
865
+
866
+ def build(self, input_shape=None):
867
+ if self.built:
868
+ return
869
+ self.built = True
870
+ if getattr(self, "embed_positions", None) is not None:
871
+ with tf.name_scope(self.embed_positions.name):
872
+ self.embed_positions.build(None)
873
+ if getattr(self, "layernorm_embedding", None) is not None:
874
+ with tf.name_scope(self.layernorm_embedding.name):
875
+ self.layernorm_embedding.build([None, None, self.embed_dim])
876
+ if getattr(self, "layer_norm", None) is not None:
877
+ with tf.name_scope(self.layer_norm.name):
878
+ self.layer_norm.build([None, None, self.config.d_model])
879
+ if getattr(self, "layers", None) is not None:
880
+ for layer in self.layers:
881
+ with tf.name_scope(layer.name):
882
+ layer.build(None)
883
+
884
+
885
+ @keras_serializable
886
+ class TFMBartDecoder(keras.layers.Layer):
887
+ config_class = MBartConfig
888
+ """
889
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFMBartDecoderLayer`]
890
+
891
+ Args:
892
+ config: MBartConfig
893
+ embed_tokens: output embedding
894
+ """
895
+
896
+ def __init__(self, config: MBartConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
897
+ super().__init__(**kwargs)
898
+ self.config = config
899
+ self.padding_idx = config.pad_token_id
900
+ self.embed_tokens = embed_tokens
901
+ self.layerdrop = config.decoder_layerdrop
902
+ self.embed_positions = TFMBartLearnedPositionalEmbedding(
903
+ config.max_position_embeddings,
904
+ config.d_model,
905
+ name="embed_positions",
906
+ )
907
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
908
+ self.layers = [TFMBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
909
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
910
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
911
+
912
+ self.dropout = keras.layers.Dropout(config.dropout)
913
+
914
+ def get_embed_tokens(self):
915
+ return self.embed_tokens
916
+
917
+ def set_embed_tokens(self, embed_tokens):
918
+ self.embed_tokens = embed_tokens
919
+
920
+ @unpack_inputs
921
+ def call(
922
+ self,
923
+ input_ids: TFModelInputType = None,
924
+ inputs_embeds: tf.Tensor | None = None,
925
+ attention_mask: tf.Tensor | None = None,
926
+ position_ids: tf.Tensor | None = None,
927
+ encoder_hidden_states: tf.Tensor | None = None,
928
+ encoder_attention_mask: tf.Tensor | None = None,
929
+ head_mask: tf.Tensor | None = None,
930
+ cross_attn_head_mask: tf.Tensor | None = None,
931
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
932
+ use_cache: Optional[bool] = None,
933
+ output_attentions: Optional[bool] = None,
934
+ output_hidden_states: Optional[bool] = None,
935
+ return_dict: Optional[bool] = None,
936
+ training: Optional[bool] = False,
937
+ ) -> Union[
938
+ TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]
939
+ ]:
940
+ r"""
941
+ Args:
942
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
943
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
944
+ provide it.
945
+
946
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
947
+ [`PreTrainedTokenizer.__call__`] for details.
948
+
949
+ [What are input IDs?](../glossary#input-ids)
950
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
951
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
952
+
953
+ - 1 for tokens that are **not masked**,
954
+ - 0 for tokens that are **masked**.
955
+
956
+ [What are attention masks?](../glossary#attention-mask)
957
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
958
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
959
+ range `[0, config.max_position_embeddings - 1]`.
960
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
961
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
962
+ of the decoder.
963
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
964
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
965
+ selected in `[0, 1]`:
966
+
967
+ - 1 for tokens that are **not masked**,
968
+ - 0 for tokens that are **masked**.
969
+
970
+ [What are attention masks?](../glossary#attention-mask)
971
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
972
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
973
+
974
+ - 1 indicates the head is **not masked**,
975
+ - 0 indicates the head is **masked**.
976
+
977
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
978
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
979
+
980
+ - 1 indicates the head is **not masked**,
981
+ - 0 indicates the head is **masked**.
982
+
983
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
984
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
985
+ decoding.
986
+
987
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
988
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
989
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
990
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
991
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
992
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
993
+ than the model's internal embedding lookup matrix.
994
+ output_attentions (`bool`, *optional*):
995
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
996
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
997
+ in the config will be used instead.
998
+ output_hidden_states (`bool`, *optional*):
999
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1000
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
1001
+ will be used instead.
1002
+ return_dict (`bool`, *optional*):
1003
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
1004
+ in eager mode, in graph mode the value will always be set to True.
1005
+ training (`bool`, *optional*, defaults to `False`):
1006
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1007
+ behaviors between training and evaluation).
1008
+ """
1009
+
1010
+ if input_ids is not None and inputs_embeds is not None:
1011
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1012
+ elif input_ids is not None:
1013
+ input_shape = shape_list(input_ids)
1014
+ elif inputs_embeds is not None:
1015
+ input_shape = shape_list(inputs_embeds)[:-1]
1016
+ else:
1017
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1018
+
1019
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
1020
+
1021
+ # embed positions
1022
+ if position_ids is None:
1023
+ positions = self.embed_positions(input_shape, past_key_values_length)
1024
+ else:
1025
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
1026
+
1027
+ if inputs_embeds is None:
1028
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
1029
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1030
+
1031
+ hidden_states = inputs_embeds
1032
+
1033
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1034
+ if input_shape[-1] > 1:
1035
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
1036
+ else:
1037
+ combined_attention_mask = _expand_mask(
1038
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
1039
+ )
1040
+
1041
+ if attention_mask is not None:
1042
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
1043
+
1044
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1045
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1046
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
1047
+
1048
+ hidden_states = self.layernorm_embedding(hidden_states + positions)
1049
+ hidden_states = self.dropout(hidden_states, training=training)
1050
+
1051
+ # decoder layers
1052
+ all_hidden_states = () if output_hidden_states else None
1053
+ all_self_attns = () if output_attentions else None
1054
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
1055
+ present_key_values = () if use_cache else None
1056
+
1057
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
1058
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
1059
+ if attn_mask is not None:
1060
+ tf.debugging.assert_equal(
1061
+ shape_list(attn_mask)[0],
1062
+ len(self.layers),
1063
+ message=(
1064
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
1065
+ f" {shape_list(attn_mask)[0]}."
1066
+ ),
1067
+ )
1068
+
1069
+ for idx, decoder_layer in enumerate(self.layers):
1070
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1071
+ if output_hidden_states:
1072
+ all_hidden_states += (hidden_states,)
1073
+ dropout_probability = random.uniform(0, 1)
1074
+
1075
+ if training and (dropout_probability < self.layerdrop):
1076
+ continue
1077
+
1078
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1079
+
1080
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
1081
+ hidden_states,
1082
+ attention_mask=combined_attention_mask,
1083
+ encoder_hidden_states=encoder_hidden_states,
1084
+ encoder_attention_mask=encoder_attention_mask,
1085
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
1086
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1087
+ past_key_value=past_key_value,
1088
+ )
1089
+
1090
+ if use_cache:
1091
+ present_key_values += (present_key_value,)
1092
+
1093
+ if output_attentions:
1094
+ all_self_attns += (layer_self_attn,)
1095
+
1096
+ if encoder_hidden_states is not None:
1097
+ all_cross_attns += (layer_cross_attn,)
1098
+
1099
+ hidden_states = self.layer_norm(hidden_states)
1100
+
1101
+ if output_hidden_states:
1102
+ all_hidden_states += (hidden_states,)
1103
+
1104
+ if not return_dict:
1105
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
1106
+ else:
1107
+ return TFBaseModelOutputWithPastAndCrossAttentions(
1108
+ last_hidden_state=hidden_states,
1109
+ past_key_values=present_key_values,
1110
+ hidden_states=all_hidden_states,
1111
+ attentions=all_self_attns,
1112
+ cross_attentions=all_cross_attns,
1113
+ )
1114
+
1115
+ def build(self, input_shape=None):
1116
+ if self.built:
1117
+ return
1118
+ self.built = True
1119
+ if getattr(self, "embed_positions", None) is not None:
1120
+ with tf.name_scope(self.embed_positions.name):
1121
+ self.embed_positions.build(None)
1122
+ if getattr(self, "layernorm_embedding", None) is not None:
1123
+ with tf.name_scope(self.layernorm_embedding.name):
1124
+ self.layernorm_embedding.build([None, None, self.config.d_model])
1125
+ if getattr(self, "layer_norm", None) is not None:
1126
+ with tf.name_scope(self.layer_norm.name):
1127
+ self.layer_norm.build([None, None, self.config.d_model])
1128
+ if getattr(self, "layers", None) is not None:
1129
+ for layer in self.layers:
1130
+ with tf.name_scope(layer.name):
1131
+ layer.build(None)
1132
+
1133
+
1134
+ @keras_serializable
1135
+ class TFMBartMainLayer(keras.layers.Layer):
1136
+ config_class = MBartConfig
1137
+
1138
+ def __init__(self, config: MBartConfig, **kwargs):
1139
+ super().__init__(**kwargs)
1140
+
1141
+ self.config = config
1142
+ self.shared = keras.layers.Embedding(
1143
+ input_dim=config.vocab_size,
1144
+ output_dim=config.d_model,
1145
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
1146
+ name="model.shared",
1147
+ )
1148
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
1149
+ self.shared.load_weight_prefix = "model.shared"
1150
+
1151
+ self.encoder = TFMBartEncoder(config, self.shared, name="encoder")
1152
+ self.decoder = TFMBartDecoder(config, self.shared, name="decoder")
1153
+
1154
+ def get_input_embeddings(self):
1155
+ return self.shared
1156
+
1157
+ def set_input_embeddings(self, new_embeddings):
1158
+ self.shared = new_embeddings
1159
+ self.encoder.embed_tokens = self.shared
1160
+ self.decoder.embed_tokens = self.shared
1161
+
1162
+ @unpack_inputs
1163
+ def call(
1164
+ self,
1165
+ input_ids: TFModelInputType = None,
1166
+ attention_mask: tf.Tensor | None = None,
1167
+ decoder_input_ids: tf.Tensor | None = None,
1168
+ decoder_attention_mask: tf.Tensor | None = None,
1169
+ decoder_position_ids: tf.Tensor | None = None,
1170
+ head_mask: tf.Tensor | None = None,
1171
+ decoder_head_mask: tf.Tensor | None = None,
1172
+ cross_attn_head_mask: tf.Tensor | None = None,
1173
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1174
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
1175
+ inputs_embeds: tf.Tensor | None = None,
1176
+ decoder_inputs_embeds: tf.Tensor | None = None,
1177
+ use_cache: Optional[bool] = None,
1178
+ output_attentions: Optional[bool] = None,
1179
+ output_hidden_states: Optional[bool] = None,
1180
+ return_dict: Optional[bool] = None,
1181
+ training: Optional[bool] = False,
1182
+ **kwargs,
1183
+ ) -> Union[TFSeq2SeqModelOutput, tf.Tensor]:
1184
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1185
+ use_cache = False
1186
+
1187
+ output_hidden_states = (
1188
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1189
+ )
1190
+
1191
+ if decoder_input_ids is None and input_ids is not None:
1192
+ decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
1193
+
1194
+ if encoder_outputs is None:
1195
+ encoder_outputs = self.encoder(
1196
+ input_ids=input_ids,
1197
+ attention_mask=attention_mask,
1198
+ head_mask=head_mask,
1199
+ inputs_embeds=inputs_embeds,
1200
+ output_attentions=output_attentions,
1201
+ output_hidden_states=output_hidden_states,
1202
+ return_dict=return_dict,
1203
+ training=training,
1204
+ )
1205
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
1206
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
1207
+ encoder_outputs = TFBaseModelOutput(
1208
+ last_hidden_state=encoder_outputs[0],
1209
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1210
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1211
+ )
1212
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
1213
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
1214
+ encoder_outputs = encoder_outputs.to_tuple()
1215
+
1216
+ decoder_outputs = self.decoder(
1217
+ decoder_input_ids,
1218
+ attention_mask=decoder_attention_mask,
1219
+ position_ids=decoder_position_ids,
1220
+ encoder_hidden_states=encoder_outputs[0],
1221
+ encoder_attention_mask=attention_mask,
1222
+ head_mask=decoder_head_mask,
1223
+ cross_attn_head_mask=cross_attn_head_mask,
1224
+ past_key_values=past_key_values,
1225
+ inputs_embeds=decoder_inputs_embeds,
1226
+ use_cache=use_cache,
1227
+ output_attentions=output_attentions,
1228
+ output_hidden_states=output_hidden_states,
1229
+ return_dict=return_dict,
1230
+ training=training,
1231
+ )
1232
+
1233
+ if not return_dict:
1234
+ return decoder_outputs + encoder_outputs
1235
+
1236
+ return TFSeq2SeqModelOutput(
1237
+ last_hidden_state=decoder_outputs.last_hidden_state,
1238
+ past_key_values=decoder_outputs.past_key_values,
1239
+ decoder_hidden_states=decoder_outputs.hidden_states,
1240
+ decoder_attentions=decoder_outputs.attentions,
1241
+ cross_attentions=decoder_outputs.cross_attentions,
1242
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1243
+ encoder_hidden_states=encoder_outputs.hidden_states,
1244
+ encoder_attentions=encoder_outputs.attentions,
1245
+ )
1246
+
1247
+ def build(self, input_shape=None):
1248
+ if self.built:
1249
+ return
1250
+ self.built = True
1251
+ # The shared/tied weights expect to be in the model base namespace
1252
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
1253
+ # the current one.
1254
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
1255
+ self.shared.build(None)
1256
+ if getattr(self, "encoder", None) is not None:
1257
+ with tf.name_scope(self.encoder.name):
1258
+ self.encoder.build(None)
1259
+ if getattr(self, "decoder", None) is not None:
1260
+ with tf.name_scope(self.decoder.name):
1261
+ self.decoder.build(None)
1262
+
1263
+
1264
+ @add_start_docstrings(
1265
+ "The bare MBART Model outputting raw hidden-states without any specific head on top.",
1266
+ MBART_START_DOCSTRING,
1267
+ )
1268
+ class TFMBartModel(TFMBartPreTrainedModel):
1269
+ def __init__(self, config: MBartConfig, *inputs, **kwargs):
1270
+ super().__init__(config, *inputs, **kwargs)
1271
+
1272
+ self.model = TFMBartMainLayer(config, name="model")
1273
+
1274
+ def get_encoder(self):
1275
+ return self.model.encoder
1276
+
1277
+ def get_decoder(self):
1278
+ return self.model.decoder
1279
+
1280
+ @unpack_inputs
1281
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1282
+ @add_code_sample_docstrings(
1283
+ checkpoint=_CHECKPOINT_FOR_DOC,
1284
+ output_type=TFSeq2SeqModelOutput,
1285
+ config_class=_CONFIG_FOR_DOC,
1286
+ )
1287
+ def call(
1288
+ self,
1289
+ input_ids: TFModelInputType = None,
1290
+ attention_mask: tf.Tensor | None = None,
1291
+ decoder_input_ids: tf.Tensor | None = None,
1292
+ decoder_attention_mask: tf.Tensor | None = None,
1293
+ decoder_position_ids: tf.Tensor | None = None,
1294
+ head_mask: tf.Tensor | None = None,
1295
+ decoder_head_mask: tf.Tensor | None = None,
1296
+ cross_attn_head_mask: tf.Tensor | None = None,
1297
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
1298
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
1299
+ inputs_embeds: tf.Tensor | None = None,
1300
+ decoder_inputs_embeds: tf.Tensor | None = None,
1301
+ use_cache: Optional[bool] = None,
1302
+ output_attentions: Optional[bool] = None,
1303
+ output_hidden_states: Optional[bool] = None,
1304
+ return_dict: Optional[bool] = None,
1305
+ training: Optional[bool] = False,
1306
+ **kwargs,
1307
+ ) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]:
1308
+ outputs = self.model(
1309
+ input_ids=input_ids,
1310
+ attention_mask=attention_mask,
1311
+ decoder_input_ids=decoder_input_ids,
1312
+ decoder_attention_mask=decoder_attention_mask,
1313
+ decoder_position_ids=decoder_position_ids,
1314
+ head_mask=head_mask,
1315
+ decoder_head_mask=decoder_head_mask,
1316
+ cross_attn_head_mask=cross_attn_head_mask,
1317
+ encoder_outputs=encoder_outputs,
1318
+ past_key_values=past_key_values,
1319
+ inputs_embeds=inputs_embeds,
1320
+ decoder_inputs_embeds=decoder_inputs_embeds,
1321
+ use_cache=use_cache,
1322
+ output_attentions=output_attentions,
1323
+ output_hidden_states=output_hidden_states,
1324
+ return_dict=return_dict,
1325
+ training=training,
1326
+ )
1327
+
1328
+ return outputs
1329
+
1330
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
1331
+ def serving_output(self, output):
1332
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1333
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1334
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1335
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1336
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1337
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1338
+
1339
+ return TFSeq2SeqModelOutput(
1340
+ last_hidden_state=output.last_hidden_state,
1341
+ past_key_values=pkv,
1342
+ decoder_hidden_states=dec_hs,
1343
+ decoder_attentions=dec_attns,
1344
+ cross_attentions=cross_attns,
1345
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1346
+ encoder_hidden_states=enc_hs,
1347
+ encoder_attentions=enc_attns,
1348
+ )
1349
+
1350
+ def build(self, input_shape=None):
1351
+ if self.built:
1352
+ return
1353
+ self.built = True
1354
+ if getattr(self, "model", None) is not None:
1355
+ with tf.name_scope(self.model.name):
1356
+ self.model.build(None)
1357
+
1358
+
1359
+ # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
1360
+ class BiasLayer(keras.layers.Layer):
1361
+ """
1362
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
1363
+ so all weights have to be registered in a layer.
1364
+ """
1365
+
1366
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
1367
+ super().__init__(name=name, **kwargs)
1368
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
1369
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
1370
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
1371
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
1372
+
1373
+ def call(self, x):
1374
+ return x + self.bias
1375
+
1376
+
1377
+ @add_start_docstrings(
1378
+ "The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.",
1379
+ MBART_START_DOCSTRING,
1380
+ )
1381
+ class TFMBartForConditionalGeneration(TFMBartPreTrainedModel, TFCausalLanguageModelingLoss):
1382
+ _keys_to_ignore_on_load_unexpected = [
1383
+ r"model.encoder.embed_tokens.weight",
1384
+ r"model.decoder.embed_tokens.weight",
1385
+ ]
1386
+
1387
+ def __init__(self, config, *inputs, **kwargs):
1388
+ super().__init__(config, *inputs, **kwargs)
1389
+ self.model = TFMBartMainLayer(config, name="model")
1390
+ self.use_cache = config.use_cache
1391
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
1392
+ self.bias_layer = BiasLayer(
1393
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
1394
+ )
1395
+
1396
+ def get_decoder(self):
1397
+ return self.model.decoder
1398
+
1399
+ def get_encoder(self):
1400
+ return self.model.encoder
1401
+
1402
+ def get_output_embeddings(self):
1403
+ return self.get_input_embeddings()
1404
+
1405
+ def set_output_embeddings(self, value):
1406
+ self.set_input_embeddings(value)
1407
+
1408
+ def get_bias(self):
1409
+ return {"final_logits_bias": self.bias_layer.bias}
1410
+
1411
+ def set_bias(self, value):
1412
+ # Replaces the existing layers containing bias for correct (de)serialization.
1413
+ vocab_size = value["final_logits_bias"].shape[-1]
1414
+ self.bias_layer = BiasLayer(
1415
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
1416
+ )
1417
+ self.bias_layer.bias.assign(value["final_logits_bias"])
1418
+
1419
+ @unpack_inputs
1420
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
1421
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1422
+ @add_end_docstrings(MBART_GENERATION_EXAMPLE)
1423
+ def call(
1424
+ self,
1425
+ input_ids: TFModelInputType = None,
1426
+ attention_mask: tf.Tensor | None = None,
1427
+ decoder_input_ids: tf.Tensor | None = None,
1428
+ decoder_attention_mask: tf.Tensor | None = None,
1429
+ decoder_position_ids: tf.Tensor | None = None,
1430
+ head_mask: tf.Tensor | None = None,
1431
+ decoder_head_mask: tf.Tensor | None = None,
1432
+ cross_attn_head_mask: tf.Tensor | None = None,
1433
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
1434
+ past_key_values: Tuple[Tuple[tf.Tensor]] = None,
1435
+ inputs_embeds: tf.Tensor | None = None,
1436
+ decoder_inputs_embeds: tf.Tensor | None = None,
1437
+ use_cache: Optional[bool] = None,
1438
+ output_attentions: Optional[bool] = None,
1439
+ output_hidden_states: Optional[bool] = None,
1440
+ return_dict: Optional[bool] = None,
1441
+ labels: tf.Tensor | None = None,
1442
+ training: Optional[bool] = False,
1443
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
1444
+ """
1445
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1446
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1447
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1448
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1449
+
1450
+ Returns:
1451
+
1452
+ """
1453
+
1454
+ if labels is not None:
1455
+ labels = tf.where(
1456
+ labels == self.config.pad_token_id,
1457
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
1458
+ labels,
1459
+ )
1460
+ use_cache = False
1461
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1462
+ decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
1463
+
1464
+ outputs = self.model(
1465
+ input_ids,
1466
+ attention_mask=attention_mask,
1467
+ decoder_input_ids=decoder_input_ids,
1468
+ encoder_outputs=encoder_outputs,
1469
+ decoder_attention_mask=decoder_attention_mask,
1470
+ decoder_position_ids=decoder_position_ids,
1471
+ head_mask=head_mask,
1472
+ decoder_head_mask=decoder_head_mask,
1473
+ cross_attn_head_mask=cross_attn_head_mask,
1474
+ past_key_values=past_key_values,
1475
+ inputs_embeds=inputs_embeds,
1476
+ decoder_inputs_embeds=decoder_inputs_embeds,
1477
+ use_cache=use_cache,
1478
+ output_attentions=output_attentions,
1479
+ output_hidden_states=output_hidden_states,
1480
+ return_dict=return_dict,
1481
+ training=training,
1482
+ )
1483
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
1484
+ lm_logits = self.bias_layer(lm_logits)
1485
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
1486
+
1487
+ if not return_dict:
1488
+ output = (lm_logits,) + outputs[1:]
1489
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1490
+ return TFSeq2SeqLMOutput(
1491
+ loss=masked_lm_loss,
1492
+ logits=lm_logits,
1493
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
1494
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
1495
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
1496
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
1497
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
1498
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
1499
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
1500
+ )
1501
+
1502
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
1503
+ def serving_output(self, output):
1504
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
1505
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
1506
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
1507
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
1508
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
1509
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
1510
+
1511
+ return TFSeq2SeqLMOutput(
1512
+ logits=output.logits,
1513
+ past_key_values=pkv,
1514
+ decoder_hidden_states=dec_hs,
1515
+ decoder_attentions=dec_attns,
1516
+ cross_attentions=cross_attns,
1517
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
1518
+ encoder_hidden_states=enc_hs,
1519
+ encoder_attentions=enc_attns,
1520
+ )
1521
+
1522
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
1523
+ def prepare_inputs_for_generation(
1524
+ self,
1525
+ decoder_input_ids,
1526
+ past_key_values=None,
1527
+ attention_mask=None,
1528
+ decoder_attention_mask=None,
1529
+ head_mask=None,
1530
+ decoder_head_mask=None,
1531
+ cross_attn_head_mask=None,
1532
+ use_cache=None,
1533
+ encoder_outputs=None,
1534
+ **kwargs,
1535
+ ):
1536
+ # cut decoder_input_ids if past_key_values is used
1537
+ if past_key_values is not None:
1538
+ decoder_input_ids = decoder_input_ids[:, -1:]
1539
+
1540
+ if decoder_attention_mask is not None: # xla
1541
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
1542
+ elif past_key_values is not None: # no xla + past_key_values
1543
+ decoder_position_ids = past_key_values[0][0].shape[2]
1544
+ else: # no xla + no past_key_values
1545
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
1546
+
1547
+ return {
1548
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1549
+ "encoder_outputs": encoder_outputs,
1550
+ "past_key_values": past_key_values,
1551
+ "decoder_input_ids": decoder_input_ids,
1552
+ "attention_mask": attention_mask,
1553
+ "decoder_attention_mask": decoder_attention_mask,
1554
+ "decoder_position_ids": decoder_position_ids,
1555
+ "head_mask": head_mask,
1556
+ "decoder_head_mask": decoder_head_mask,
1557
+ "cross_attn_head_mask": cross_attn_head_mask,
1558
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1559
+ }
1560
+
1561
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
1562
+ return shift_tokens_right(labels, self.config.pad_token_id)
1563
+
1564
+ def build(self, input_shape=None):
1565
+ if self.built:
1566
+ return
1567
+ self.built = True
1568
+ if getattr(self, "model", None) is not None:
1569
+ with tf.name_scope(self.model.name):
1570
+ self.model.build(None)
1571
+ if getattr(self, "bias_layer", None) is not None:
1572
+ with tf.name_scope(self.bias_layer.name):
1573
+ self.bias_layer.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip
34
+
35
+
36
+ class MBartTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct an MBART tokenizer.
39
+
40
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
41
+ [SentencePiece](https://github.com/google/sentencepiece).
42
+
43
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
44
+ <tokens> <eos>` for target language documents.
45
+
46
+ Examples:
47
+
48
+ ```python
49
+ >>> from transformers import MBartTokenizer
50
+
51
+ >>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO")
52
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
53
+ >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
54
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
55
+ ```"""
56
+
57
+ vocab_files_names = VOCAB_FILES_NAMES
58
+ model_input_names = ["input_ids", "attention_mask"]
59
+
60
+ prefix_tokens: List[int] = []
61
+ suffix_tokens: List[int] = []
62
+
63
+ def __init__(
64
+ self,
65
+ vocab_file,
66
+ bos_token="<s>",
67
+ eos_token="</s>",
68
+ sep_token="</s>",
69
+ cls_token="<s>",
70
+ unk_token="<unk>",
71
+ pad_token="<pad>",
72
+ mask_token="<mask>",
73
+ tokenizer_file=None,
74
+ src_lang=None,
75
+ tgt_lang=None,
76
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
77
+ additional_special_tokens=None,
78
+ **kwargs,
79
+ ):
80
+ # Mask token behave like a normal word, i.e. include the space before it
81
+ mask_token = (
82
+ AddedToken(mask_token, lstrip=True, normalized=False) if isinstance(mask_token, str) else mask_token
83
+ )
84
+
85
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
86
+
87
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
88
+ self.sp_model.Load(str(vocab_file))
89
+ self.vocab_file = vocab_file
90
+
91
+ # Original fairseq vocab and spm vocab must be "aligned":
92
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
93
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
94
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
95
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
96
+
97
+ # Mimic fairseq token-to-id alignment for the first 4 token
98
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
99
+
100
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
101
+ self.fairseq_offset = 1
102
+
103
+ self.sp_model_size = len(self.sp_model)
104
+ self.lang_code_to_id = {
105
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
106
+ }
107
+ self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
108
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
109
+
110
+ self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
111
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
112
+ _additional_special_tokens = list(self.lang_code_to_id.keys())
113
+
114
+ if additional_special_tokens is not None:
115
+ # Only add those special tokens if they are not already there.
116
+ _additional_special_tokens.extend(
117
+ [t for t in additional_special_tokens if t not in _additional_special_tokens]
118
+ )
119
+
120
+ super().__init__(
121
+ bos_token=bos_token,
122
+ eos_token=eos_token,
123
+ unk_token=unk_token,
124
+ sep_token=sep_token,
125
+ cls_token=cls_token,
126
+ pad_token=pad_token,
127
+ mask_token=mask_token,
128
+ tokenizer_file=None,
129
+ src_lang=src_lang,
130
+ tgt_lang=tgt_lang,
131
+ additional_special_tokens=_additional_special_tokens,
132
+ sp_model_kwargs=self.sp_model_kwargs,
133
+ **kwargs,
134
+ )
135
+
136
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
137
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
138
+ self.tgt_lang = tgt_lang
139
+ self.set_src_lang_special_tokens(self._src_lang)
140
+
141
+ def __getstate__(self):
142
+ state = self.__dict__.copy()
143
+ state["sp_model"] = None
144
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
145
+ return state
146
+
147
+ def __setstate__(self, d):
148
+ self.__dict__ = d
149
+
150
+ # for backward compatibility
151
+ if not hasattr(self, "sp_model_kwargs"):
152
+ self.sp_model_kwargs = {}
153
+
154
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
155
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
156
+
157
+ @property
158
+ def vocab_size(self):
159
+ return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
160
+
161
+ @property
162
+ def src_lang(self) -> str:
163
+ return self._src_lang
164
+
165
+ @src_lang.setter
166
+ def src_lang(self, new_src_lang: str) -> None:
167
+ self._src_lang = new_src_lang
168
+ self.set_src_lang_special_tokens(self._src_lang)
169
+
170
+ def get_special_tokens_mask(
171
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
172
+ ) -> List[int]:
173
+ """
174
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
175
+ special tokens using the tokenizer `prepare_for_model` method.
176
+
177
+ Args:
178
+ token_ids_0 (`List[int]`):
179
+ List of IDs.
180
+ token_ids_1 (`List[int]`, *optional*):
181
+ Optional second list of IDs for sequence pairs.
182
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
183
+ Whether or not the token list is already formatted with special tokens for the model.
184
+
185
+ Returns:
186
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
187
+ """
188
+
189
+ if already_has_special_tokens:
190
+ return super().get_special_tokens_mask(
191
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
192
+ )
193
+
194
+ prefix_ones = [1] * len(self.prefix_tokens)
195
+ suffix_ones = [1] * len(self.suffix_tokens)
196
+ if token_ids_1 is None:
197
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
198
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
199
+
200
+ def build_inputs_with_special_tokens(
201
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
202
+ ) -> List[int]:
203
+ """
204
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
205
+ adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:
206
+
207
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
208
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
209
+
210
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
211
+ separator.
212
+
213
+ Args:
214
+ token_ids_0 (`List[int]`):
215
+ List of IDs to which the special tokens will be added.
216
+ token_ids_1 (`List[int]`, *optional*):
217
+ Optional second list of IDs for sequence pairs.
218
+
219
+ Returns:
220
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
221
+ """
222
+ if token_ids_1 is None:
223
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
224
+ # We don't expect to process pairs, but leave the pair logic for API consistency
225
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
226
+
227
+ def create_token_type_ids_from_sequences(
228
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
229
+ ) -> List[int]:
230
+ """
231
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not
232
+ make use of token type ids, therefore a list of zeros is returned.
233
+
234
+ Args:
235
+ token_ids_0 (`List[int]`):
236
+ List of IDs.
237
+ token_ids_1 (`List[int]`, *optional*):
238
+ Optional second list of IDs for sequence pairs.
239
+
240
+ Returns:
241
+ `List[int]`: List of zeros.
242
+
243
+ """
244
+
245
+ sep = [self.sep_token_id]
246
+ cls = [self.cls_token_id]
247
+
248
+ if token_ids_1 is None:
249
+ return len(cls + token_ids_0 + sep) * [0]
250
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
251
+
252
+ def _build_translation_inputs(
253
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
254
+ ):
255
+ """Used by translation pipeline, to prepare inputs for the generate function"""
256
+ if src_lang is None or tgt_lang is None:
257
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
258
+ self.src_lang = src_lang
259
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
260
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
261
+ inputs["forced_bos_token_id"] = tgt_lang_id
262
+ return inputs
263
+
264
+ def get_vocab(self):
265
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
266
+ vocab.update(self.added_tokens_encoder)
267
+ return vocab
268
+
269
+ def _tokenize(self, text: str) -> List[str]:
270
+ return self.sp_model.encode(text, out_type=str)
271
+
272
+ def _convert_token_to_id(self, token):
273
+ """Converts a token (str) in an id using the vocab."""
274
+ if token in self.fairseq_tokens_to_ids:
275
+ return self.fairseq_tokens_to_ids[token]
276
+ spm_id = self.sp_model.PieceToId(token)
277
+
278
+ # Need to return unknown token if the SP model returned 0
279
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
280
+
281
+ def _convert_id_to_token(self, index):
282
+ """Converts an index (integer) in a token (str) using the vocab."""
283
+ if index in self.fairseq_ids_to_tokens:
284
+ return self.fairseq_ids_to_tokens[index]
285
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
286
+
287
+ def convert_tokens_to_string(self, tokens):
288
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
289
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
290
+ return out_string
291
+
292
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
293
+ if not os.path.isdir(save_directory):
294
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
295
+ return
296
+ out_vocab_file = os.path.join(
297
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
298
+ )
299
+
300
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
301
+ copyfile(self.vocab_file, out_vocab_file)
302
+ elif not os.path.isfile(self.vocab_file):
303
+ with open(out_vocab_file, "wb") as fi:
304
+ content_spiece_model = self.sp_model.serialized_model_proto()
305
+ fi.write(content_spiece_model)
306
+
307
+ return (out_vocab_file,)
308
+
309
+ def prepare_seq2seq_batch(
310
+ self,
311
+ src_texts: List[str],
312
+ src_lang: str = "en_XX",
313
+ tgt_texts: Optional[List[str]] = None,
314
+ tgt_lang: str = "ro_RO",
315
+ **kwargs,
316
+ ) -> BatchEncoding:
317
+ self.src_lang = src_lang
318
+ self.tgt_lang = tgt_lang
319
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
320
+
321
+ def _switch_to_input_mode(self):
322
+ return self.set_src_lang_special_tokens(self.src_lang)
323
+
324
+ def _switch_to_target_mode(self):
325
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
326
+
327
+ def set_src_lang_special_tokens(self, src_lang) -> None:
328
+ """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
329
+ self.cur_lang_code = self.lang_code_to_id[src_lang]
330
+ self.prefix_tokens = []
331
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
332
+
333
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
334
+ """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
335
+ self.cur_lang_code = self.lang_code_to_id[lang]
336
+ self.prefix_tokens = []
337
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart_fast.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_mbart import MBartTokenizer
29
+ else:
30
+ MBartTokenizer = None
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
37
+
38
+
39
+ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip
40
+
41
+
42
+ class MBartTokenizerFast(PreTrainedTokenizerFast):
43
+ """
44
+ Construct a "fast" MBART tokenizer (backed by HuggingFace's *tokenizers* library). Based on
45
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
46
+
47
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
48
+ refer to this superclass for more information regarding those methods.
49
+
50
+ The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
51
+ <tokens> <eos>` for target language documents.
52
+
53
+ Examples:
54
+
55
+ ```python
56
+ >>> from transformers import MBartTokenizerFast
57
+
58
+ >>> tokenizer = MBartTokenizerFast.from_pretrained(
59
+ ... "facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO"
60
+ ... )
61
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
62
+ >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
63
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
64
+ ```"""
65
+
66
+ vocab_files_names = VOCAB_FILES_NAMES
67
+ model_input_names = ["input_ids", "attention_mask"]
68
+ slow_tokenizer_class = MBartTokenizer
69
+
70
+ prefix_tokens: List[int] = []
71
+ suffix_tokens: List[int] = []
72
+
73
+ def __init__(
74
+ self,
75
+ vocab_file=None,
76
+ tokenizer_file=None,
77
+ bos_token="<s>",
78
+ eos_token="</s>",
79
+ sep_token="</s>",
80
+ cls_token="<s>",
81
+ unk_token="<unk>",
82
+ pad_token="<pad>",
83
+ mask_token="<mask>",
84
+ src_lang=None,
85
+ tgt_lang=None,
86
+ additional_special_tokens=None,
87
+ **kwargs,
88
+ ):
89
+ # Mask token behave like a normal word, i.e. include the space before it
90
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
91
+
92
+ _additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy()
93
+
94
+ if additional_special_tokens is not None:
95
+ # Only add those special tokens if they are not already there.
96
+ _additional_special_tokens.extend(
97
+ [t for t in additional_special_tokens if t not in _additional_special_tokens]
98
+ )
99
+
100
+ super().__init__(
101
+ vocab_file=vocab_file,
102
+ tokenizer_file=tokenizer_file,
103
+ bos_token=bos_token,
104
+ eos_token=eos_token,
105
+ sep_token=sep_token,
106
+ cls_token=cls_token,
107
+ unk_token=unk_token,
108
+ pad_token=pad_token,
109
+ mask_token=mask_token,
110
+ src_lang=src_lang,
111
+ tgt_lang=tgt_lang,
112
+ additional_special_tokens=_additional_special_tokens,
113
+ **kwargs,
114
+ )
115
+
116
+ self.vocab_file = vocab_file
117
+ self.lang_code_to_id = {
118
+ lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES
119
+ }
120
+
121
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
122
+ self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang)
123
+ self.tgt_lang = tgt_lang
124
+ self.set_src_lang_special_tokens(self._src_lang)
125
+
126
+ @property
127
+ def can_save_slow_tokenizer(self) -> bool:
128
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
129
+
130
+ @property
131
+ def src_lang(self) -> str:
132
+ return self._src_lang
133
+
134
+ @src_lang.setter
135
+ def src_lang(self, new_src_lang: str) -> None:
136
+ self._src_lang = new_src_lang
137
+ self.set_src_lang_special_tokens(self._src_lang)
138
+
139
+ def build_inputs_with_special_tokens(
140
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
141
+ ) -> List[int]:
142
+ """
143
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
144
+ adding special tokens. The special tokens depend on calling set_lang.
145
+
146
+ An MBART sequence has the following format, where `X` represents the sequence:
147
+
148
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
149
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
150
+
151
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
152
+ separator.
153
+
154
+ Args:
155
+ token_ids_0 (`List[int]`):
156
+ List of IDs to which the special tokens will be added.
157
+ token_ids_1 (`List[int]`, *optional*):
158
+ Optional second list of IDs for sequence pairs.
159
+
160
+ Returns:
161
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
162
+ """
163
+ if token_ids_1 is None:
164
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
165
+ # We don't expect to process pairs, but leave the pair logic for API consistency
166
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
167
+
168
+ def create_token_type_ids_from_sequences(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
170
+ ) -> List[int]:
171
+ """
172
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not
173
+ make use of token type ids, therefore a list of zeros is returned.
174
+
175
+ Args:
176
+ token_ids_0 (`List[int]`):
177
+ List of IDs.
178
+ token_ids_1 (`List[int]`, *optional*):
179
+ Optional second list of IDs for sequence pairs.
180
+
181
+ Returns:
182
+ `List[int]`: List of zeros.
183
+
184
+ """
185
+
186
+ sep = [self.sep_token_id]
187
+ cls = [self.cls_token_id]
188
+
189
+ if token_ids_1 is None:
190
+ return len(cls + token_ids_0 + sep) * [0]
191
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
192
+
193
+ def _build_translation_inputs(
194
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
195
+ ):
196
+ """Used by translation pipeline, to prepare inputs for the generate function"""
197
+ if src_lang is None or tgt_lang is None:
198
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
199
+ self.src_lang = src_lang
200
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
201
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
202
+ inputs["forced_bos_token_id"] = tgt_lang_id
203
+ return inputs
204
+
205
+ def prepare_seq2seq_batch(
206
+ self,
207
+ src_texts: List[str],
208
+ src_lang: str = "en_XX",
209
+ tgt_texts: Optional[List[str]] = None,
210
+ tgt_lang: str = "ro_RO",
211
+ **kwargs,
212
+ ) -> BatchEncoding:
213
+ self.src_lang = src_lang
214
+ self.tgt_lang = tgt_lang
215
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
216
+
217
+ def _switch_to_input_mode(self):
218
+ return self.set_src_lang_special_tokens(self.src_lang)
219
+
220
+ def _switch_to_target_mode(self):
221
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
222
+
223
+ def set_src_lang_special_tokens(self, src_lang) -> None:
224
+ """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
225
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
226
+ self.prefix_tokens = []
227
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
228
+
229
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
230
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
231
+
232
+ self._tokenizer.post_processor = processors.TemplateProcessing(
233
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
234
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
235
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
236
+ )
237
+
238
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
239
+ """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
240
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
241
+ self.prefix_tokens = []
242
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
243
+
244
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
245
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
246
+
247
+ self._tokenizer.post_processor = processors.TemplateProcessing(
248
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
249
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
250
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
251
+ )
252
+
253
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
254
+ if not self.can_save_slow_tokenizer:
255
+ raise ValueError(
256
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
257
+ "tokenizer."
258
+ )
259
+
260
+ if not os.path.isdir(save_directory):
261
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
262
+ return
263
+ out_vocab_file = os.path.join(
264
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
265
+ )
266
+
267
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
268
+ copyfile(self.vocab_file, out_vocab_file)
269
+
270
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_megatron_bert"] = [
30
+ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "MegatronBertForCausalLM",
32
+ "MegatronBertForMaskedLM",
33
+ "MegatronBertForMultipleChoice",
34
+ "MegatronBertForNextSentencePrediction",
35
+ "MegatronBertForPreTraining",
36
+ "MegatronBertForQuestionAnswering",
37
+ "MegatronBertForSequenceClassification",
38
+ "MegatronBertForTokenClassification",
39
+ "MegatronBertModel",
40
+ "MegatronBertPreTrainedModel",
41
+ ]
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_megatron_bert import (
53
+ MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ MegatronBertForCausalLM,
55
+ MegatronBertForMaskedLM,
56
+ MegatronBertForMultipleChoice,
57
+ MegatronBertForNextSentencePrediction,
58
+ MegatronBertForPreTraining,
59
+ MegatronBertForQuestionAnswering,
60
+ MegatronBertForSequenceClassification,
61
+ MegatronBertForTokenClassification,
62
+ MegatronBertModel,
63
+ MegatronBertPreTrainedModel,
64
+ )
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/configuration_megatron_bert.cpython-310.pyc ADDED
Binary file (5.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/convert_megatron_bert_checkpoint.cpython-310.pyc ADDED
Binary file (5.84 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/__pycache__/modeling_megatron_bert.cpython-310.pyc ADDED
Binary file (54.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/configuration_megatron_bert.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021- NVIDIA Corporation and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MEGATRON_BERT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class MegatronBertConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`MegatronBertModel`]. It is used to instantiate a
30
+ MEGATRON_BERT model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the MEGATRON_BERT
32
+ [nvidia/megatron-bert-uncased-345m](https://huggingface.co/nvidia/megatron-bert-uncased-345m) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 29056):
40
+ Vocabulary size of the MEGATRON_BERT model. Defines the number of different tokens that can be represented
41
+ by the `inputs_ids` passed when calling [`MegatronBertModel`].
42
+ hidden_size (`int`, *optional*, defaults to 1024):
43
+ Dimensionality of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 24):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 16):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 4096):
49
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
52
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
53
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
54
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
55
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout ratio for the attention probabilities.
57
+ max_position_embeddings (`int`, *optional*, defaults to 512):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ type_vocab_size (`int`, *optional*, defaults to 2):
61
+ The vocabulary size of the `token_type_ids` passed when calling [`MegatronBertModel`].
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the layer normalization layers.
66
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
67
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
68
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
69
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
70
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
71
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
72
+ is_decoder (`bool`, *optional*, defaults to `False`):
73
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
74
+ use_cache (`bool`, *optional*, defaults to `True`):
75
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
76
+ relevant if `config.is_decoder=True`.
77
+
78
+ Examples:
79
+
80
+ ```python
81
+ >>> from transformers import MegatronBertConfig, MegatronBertModel
82
+
83
+ >>> # Initializing a MEGATRON_BERT google-bert/bert-base-uncased style configuration
84
+ >>> configuration = MegatronBertConfig()
85
+
86
+ >>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration
87
+ >>> model = MegatronBertModel(configuration)
88
+
89
+ >>> # Accessing the model configuration
90
+ >>> configuration = model.config
91
+ ```"""
92
+
93
+ model_type = "megatron-bert"
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_size=29056,
98
+ hidden_size=1024,
99
+ num_hidden_layers=24,
100
+ num_attention_heads=16,
101
+ intermediate_size=4096,
102
+ hidden_act="gelu",
103
+ hidden_dropout_prob=0.1,
104
+ attention_probs_dropout_prob=0.1,
105
+ max_position_embeddings=512,
106
+ type_vocab_size=2,
107
+ initializer_range=0.02,
108
+ layer_norm_eps=1e-12,
109
+ pad_token_id=0,
110
+ position_embedding_type="absolute",
111
+ use_cache=True,
112
+ **kwargs,
113
+ ):
114
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
115
+
116
+ self.vocab_size = vocab_size
117
+ self.hidden_size = hidden_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.hidden_act = hidden_act
121
+ self.intermediate_size = intermediate_size
122
+ self.hidden_dropout_prob = hidden_dropout_prob
123
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
124
+ self.max_position_embeddings = max_position_embeddings
125
+ self.type_vocab_size = type_vocab_size
126
+ self.initializer_range = initializer_range
127
+ self.layer_norm_eps = layer_norm_eps
128
+ self.position_embedding_type = position_embedding_type
129
+ self.use_cache = use_cache
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ####################################################################################################
2
+
3
+ # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ ####################################################################################################
18
+
19
+ #
20
+ # Note: If when running this conversion script you're getting an exception:
21
+ # ModuleNotFoundError: No module named 'megatron.model.enums'
22
+ # you need to tell python where to find the clone of Megatron-LM, e.g.:
23
+ #
24
+ # cd /tmp
25
+ # git clone https://github.com/NVIDIA/Megatron-LM
26
+ # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ...
27
+ #
28
+ # if you already have it cloned elsewhere, simply adjust the path to the existing path
29
+ #
30
+ # If the training was done using a Megatron-LM fork, e.g.,
31
+ # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
32
+ # in your path, i.e., /path/to/Megatron-DeepSpeed/
33
+ #
34
+
35
+ import argparse
36
+ import os
37
+ import re
38
+ import zipfile
39
+
40
+ import torch
41
+
42
+ from transformers import MegatronBertConfig
43
+
44
+
45
+ ####################################################################################################
46
+
47
+
48
+ def recursive_print(name, val, spaces=0):
49
+ # Format the message.
50
+ if name is None:
51
+ msg = None
52
+ else:
53
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
54
+ msg = fmt.format(name)
55
+
56
+ # Print and recurse (if needed).
57
+ if isinstance(val, dict):
58
+ if msg is not None:
59
+ print(msg)
60
+ for k in val.keys():
61
+ recursive_print(k, val[k], spaces + 2)
62
+ elif isinstance(val, torch.Tensor):
63
+ print(msg, ":", val.size())
64
+ else:
65
+ print(msg, ":", val)
66
+
67
+
68
+ def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size):
69
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
70
+ # for compatibility with later versions of NVIDIA Megatron-LM.
71
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
72
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
73
+ # If param is the weight tensor of the self-attention block, the returned tensor
74
+ # will have to be transposed one more time to be read by HuggingFace BERT.
75
+ input_shape = param.size()
76
+ if checkpoint_version == 1.0:
77
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
78
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
79
+ param = param.view(*saved_shape)
80
+ param = param.transpose(0, 2)
81
+ param = param.transpose(1, 2).contiguous()
82
+ elif checkpoint_version >= 2.0:
83
+ # other versions store [num_heads * num_splits * hidden_size, :]
84
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
85
+ param = param.view(*saved_shape)
86
+ param = param.transpose(0, 1).contiguous()
87
+ param = param.view(*input_shape)
88
+ return param
89
+
90
+
91
+ ####################################################################################################
92
+
93
+
94
+ def convert_megatron_checkpoint(args, input_state_dict, config):
95
+ # The converted output model.
96
+ output_state_dict = {}
97
+
98
+ # old versions did not store training args
99
+ ds_args = input_state_dict.get("args", None)
100
+ if ds_args is not None:
101
+ # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
102
+ # from pprint import pprint
103
+ # pprint(vars(ds_args))
104
+
105
+ config.tokenizer_type = ds_args.tokenizer_type
106
+ config.vocab_size = ds_args.padded_vocab_size
107
+ config.max_position_embeddings = ds_args.max_position_embeddings
108
+ config.hidden_size = ds_args.hidden_size
109
+ config.num_hidden_layers = ds_args.num_layers
110
+ config.num_attention_heads = ds_args.num_attention_heads
111
+ config.intermediate_size = ds_args.ffn_hidden_size if "ffn_hidden_size" in ds_args else 4 * ds_args.hidden_size
112
+ # pprint(config)
113
+
114
+ # The number of heads.
115
+ heads = config.num_attention_heads
116
+ # The hidden_size per head.
117
+ hidden_size_per_head = config.hidden_size // heads
118
+ # Megatron-LM checkpoint version
119
+ if "checkpoint_version" in input_state_dict.keys():
120
+ checkpoint_version = input_state_dict["checkpoint_version"]
121
+ else:
122
+ checkpoint_version = 0.0
123
+
124
+ # The model.
125
+ model = input_state_dict["model"]
126
+ # The language model.
127
+ lm = model["language_model"]
128
+ # The embeddings.
129
+ embeddings = lm["embedding"]
130
+
131
+ # The word embeddings.
132
+ word_embeddings = embeddings["word_embeddings"]["weight"]
133
+ # Truncate the embedding table to vocab_size rows.
134
+ word_embeddings = word_embeddings[: config.vocab_size, :]
135
+ # Store the word embeddings.
136
+ output_state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings
137
+
138
+ # The position embeddings.
139
+ pos_embeddings = embeddings["position_embeddings"]["weight"]
140
+ assert pos_embeddings.size(0) == config.max_position_embeddings and pos_embeddings.size(1) == config.hidden_size
141
+ # Store the position embeddings.
142
+ output_state_dict["bert.embeddings.position_embeddings.weight"] = pos_embeddings
143
+
144
+ # The token-type embeddings.
145
+ tokentype_embeddings = embeddings["tokentype_embeddings"]["weight"]
146
+ # Store the position embeddings.
147
+ output_state_dict["bert.embeddings.token_type_embeddings.weight"] = tokentype_embeddings
148
+
149
+ # The transformer.
150
+ transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
151
+
152
+ # The regex to extract layer names.
153
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
154
+
155
+ # The simple map of names for "automated" rules.
156
+ megatron_to_transformers = {
157
+ "attention.dense": ".attention.output.dense.",
158
+ "self_attention.dense": ".attention.output.dense.",
159
+ "mlp.dense_h_to_4h": ".intermediate.dense.",
160
+ "mlp.dense_4h_to_h": ".output.dense.",
161
+ }
162
+
163
+ # Keep track of the attention/query/value tensor.
164
+ attention_qkv_weight = None
165
+
166
+ # Extract the layers.
167
+ for key, val in transformer.items():
168
+ # Match the name.
169
+ m = layer_re.match(key)
170
+
171
+ # Stop if that's not a layer
172
+ if m is None:
173
+ break
174
+
175
+ # The index of the layer.
176
+ layer_idx = int(m.group(1))
177
+ # The name of the operation.
178
+ op_name = m.group(2)
179
+ # Is it a weight or a bias?
180
+ weight_or_bias = m.group(3)
181
+
182
+ # The name of the layer.
183
+ layer_name = f"bert.encoder.layer.{layer_idx}"
184
+
185
+ # For layernorm(s), simply store the layer norm.
186
+ if op_name.endswith("layernorm"):
187
+ ln_name = "attention.ln" if op_name.startswith("input") else "ln"
188
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
189
+
190
+ # Transpose the QKV matrix.
191
+ elif (
192
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
193
+ ) and weight_or_bias == "weight":
194
+ # Make sure the QKV pointer is nil.
195
+ assert attention_qkv_weight is None, ""
196
+
197
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
198
+ # Store the tensor as we need the bias as well to interleave QKV and biases.
199
+ attention_qkv_weight = out_val
200
+
201
+ # Transpose the bias.
202
+ elif (
203
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
204
+ ) and weight_or_bias == "bias":
205
+ # Make sure we read the weight tensor.
206
+ assert attention_qkv_weight is not None, ""
207
+
208
+ # Split the QKV matrix into Q, K and V. Megatron stores Q,K,V interleaved.
209
+ q = attention_qkv_weight[0 * config.hidden_size : 1 * config.hidden_size, :]
210
+ k = attention_qkv_weight[1 * config.hidden_size : 2 * config.hidden_size, :]
211
+ v = attention_qkv_weight[2 * config.hidden_size : 3 * config.hidden_size, :]
212
+
213
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
214
+ # Split the bias.
215
+ q_bias = out_val[0 * config.hidden_size : 1 * config.hidden_size]
216
+ k_bias = out_val[1 * config.hidden_size : 2 * config.hidden_size]
217
+ v_bias = out_val[2 * config.hidden_size : 3 * config.hidden_size]
218
+
219
+ # Store.
220
+ output_state_dict[f"{layer_name}.attention.self.query.weight"] = q
221
+ output_state_dict[f"{layer_name}.attention.self.query.bias"] = q_bias
222
+ output_state_dict[f"{layer_name}.attention.self.key.weight"] = k
223
+ output_state_dict[f"{layer_name}.attention.self.key.bias"] = k_bias
224
+ output_state_dict[f"{layer_name}.attention.self.value.weight"] = v
225
+ output_state_dict[f"{layer_name}.attention.self.value.bias"] = v_bias
226
+
227
+ # Clear the stored tensor.
228
+ attention_qkv_weight = None
229
+
230
+ # Copy weights and biases as is.
231
+ elif weight_or_bias in ["weight", "bias"]:
232
+ out_name = megatron_to_transformers[op_name]
233
+ output_state_dict[layer_name + out_name + weight_or_bias] = val
234
+
235
+ # The final layernorm.
236
+ output_state_dict["bert.encoder.ln.weight"] = transformer["final_layernorm.weight"]
237
+ output_state_dict["bert.encoder.ln.bias"] = transformer["final_layernorm.bias"]
238
+
239
+ # The pooler.
240
+ pooler = lm["pooler"]
241
+
242
+ # Store the matrix and the bias.
243
+ output_state_dict["bert.pooler.dense.weight"] = pooler["dense.weight"]
244
+ output_state_dict["bert.pooler.dense.bias"] = pooler["dense.bias"]
245
+
246
+ # The LM head from Megatron (for RACE).
247
+ lm_head = model["lm_head"]
248
+
249
+ # The transform matrix.
250
+ output_state_dict["cls.predictions.transform.dense.weight"] = lm_head["dense.weight"]
251
+ output_state_dict["cls.predictions.transform.dense.bias"] = lm_head["dense.bias"]
252
+
253
+ # The transform LN.
254
+ output_state_dict["cls.predictions.transform.LayerNorm.weight"] = lm_head["layernorm.weight"]
255
+ output_state_dict["cls.predictions.transform.LayerNorm.bias"] = lm_head["layernorm.bias"]
256
+
257
+ # For the decoder, we replicate the weights.
258
+ output_state_dict["cls.predictions.decoder.weight"] = word_embeddings
259
+ output_state_dict["cls.predictions.bias"] = lm_head["bias"]
260
+
261
+ # The classifier from Megatron (for MLNI).
262
+ binary_head = model["binary_head"]
263
+
264
+ # Store the classifier.
265
+ output_state_dict["cls.seq_relationship.weight"] = binary_head["weight"]
266
+ output_state_dict["cls.seq_relationship.bias"] = binary_head["bias"]
267
+
268
+ # It should be done!
269
+ return output_state_dict
270
+
271
+
272
+ ####################################################################################################
273
+
274
+
275
+ def main():
276
+ # Create the argument parser.
277
+ parser = argparse.ArgumentParser()
278
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
279
+ parser.add_argument("path_to_checkpoint", type=str, help="Path to the ZIP file containing the checkpoint")
280
+ parser.add_argument(
281
+ "--config_file",
282
+ default="",
283
+ type=str,
284
+ help="An optional config json file describing the pre-trained model.",
285
+ )
286
+ args = parser.parse_args()
287
+
288
+ # Extract the basename.
289
+ basename = os.path.dirname(args.path_to_checkpoint)
290
+
291
+ # Load the model.
292
+ # the .zip is very optional, let's keep it for backward compatibility
293
+ print(f'Extracting PyTorch state dictionary from "{args.path_to_checkpoint}"')
294
+ if args.path_to_checkpoint.endswith(".zip"):
295
+ with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
296
+ with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
297
+ input_state_dict = torch.load(pytorch_dict, map_location="cpu")
298
+ else:
299
+ input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu")
300
+
301
+ if args.config_file == "":
302
+ # Default config of megatron-bert 345m
303
+ config = MegatronBertConfig()
304
+
305
+ # different megatron-bert-*-345m models have different vocab sizes, so override the default
306
+ # config (which is for megatron-bert-cased-345m) with the actual vocab dimension
307
+ config.vocab_size = input_state_dict["model"]["lm_head"]["bias"].numel()
308
+ else:
309
+ config = MegatronBertConfig.from_json_file(args.config_file)
310
+
311
+ # Convert.
312
+ print("Converting")
313
+ output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
314
+
315
+ # Print the structure of converted state dict.
316
+ if args.print_checkpoint_structure:
317
+ recursive_print(None, output_state_dict)
318
+
319
+ # Store the config to file.
320
+ print("Saving config")
321
+ config.save_pretrained(basename)
322
+
323
+ # Store the state_dict to file.
324
+ output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
325
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
326
+ torch.save(output_state_dict, output_checkpoint_file)
327
+
328
+
329
+ ####################################################################################################
330
+
331
+ if __name__ == "__main__":
332
+ main()
333
+
334
+ ####################################################################################################
llmeval-env/lib/python3.10/site-packages/transformers/models/megatron_bert/modeling_megatron_bert.py ADDED
@@ -0,0 +1,1836 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch MegatronBERT model."""
17
+
18
+
19
+ import math
20
+ import os
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+
30
+ from ...activations import ACT2FN
31
+ from ...modeling_outputs import (
32
+ BaseModelOutputWithPastAndCrossAttentions,
33
+ BaseModelOutputWithPoolingAndCrossAttentions,
34
+ CausalLMOutputWithCrossAttentions,
35
+ MaskedLMOutput,
36
+ MultipleChoiceModelOutput,
37
+ NextSentencePredictorOutput,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutput,
40
+ TokenClassifierOutput,
41
+ )
42
+ from ...modeling_utils import PreTrainedModel
43
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
44
+ from ...utils import (
45
+ ModelOutput,
46
+ add_code_sample_docstrings,
47
+ add_start_docstrings,
48
+ add_start_docstrings_to_model_forward,
49
+ logging,
50
+ replace_return_docstrings,
51
+ )
52
+ from .configuration_megatron_bert import MegatronBertConfig
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CONFIG_FOR_DOC = "MegatronBertConfig"
58
+ _CHECKPOINT_FOR_DOC = "nvidia/megatron-bert-cased-345m"
59
+
60
+
61
+ from ..deprecated._archive_maps import MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
62
+
63
+
64
+ def load_tf_weights_in_megatron_bert(model, config, tf_checkpoint_path):
65
+ """Load tf checkpoints in a pytorch model."""
66
+ try:
67
+ import re
68
+
69
+ import numpy as np
70
+ import tensorflow as tf
71
+ except ImportError:
72
+ logger.error(
73
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
74
+ "https://www.tensorflow.org/install/ for installation instructions."
75
+ )
76
+ raise
77
+ tf_path = os.path.abspath(tf_checkpoint_path)
78
+ logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
79
+ # Load weights from TF model
80
+ init_vars = tf.train.list_variables(tf_path)
81
+ names = []
82
+ arrays = []
83
+ for name, shape in init_vars:
84
+ logger.info(f"Loading TF weight {name} with shape {shape}")
85
+ array = tf.train.load_variable(tf_path, name)
86
+ names.append(name)
87
+ arrays.append(array)
88
+
89
+ for name, array in zip(names, arrays):
90
+ name = name.split("/")
91
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
92
+ # which are not required for using pretrained model
93
+ if any(
94
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
95
+ for n in name
96
+ ):
97
+ logger.info(f"Skipping {'/'.join(name)}")
98
+ continue
99
+ pointer = model
100
+ for m_name in name:
101
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
102
+ scope_names = re.split(r"_(\d+)", m_name)
103
+ else:
104
+ scope_names = [m_name]
105
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
106
+ pointer = getattr(pointer, "weight")
107
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
108
+ pointer = getattr(pointer, "bias")
109
+ elif scope_names[0] == "output_weights":
110
+ pointer = getattr(pointer, "weight")
111
+ elif scope_names[0] == "squad":
112
+ pointer = getattr(pointer, "classifier")
113
+ else:
114
+ try:
115
+ pointer = getattr(pointer, scope_names[0])
116
+ except AttributeError:
117
+ logger.info(f"Skipping {'/'.join(name)}")
118
+ continue
119
+ if len(scope_names) >= 2:
120
+ num = int(scope_names[1])
121
+ pointer = pointer[num]
122
+ if m_name[-11:] == "_embeddings":
123
+ pointer = getattr(pointer, "weight")
124
+ elif m_name == "kernel":
125
+ array = np.transpose(array)
126
+ if pointer.shape != array.shape:
127
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
128
+ logger.info("Initialize PyTorch weight {}".format(name))
129
+ pointer.data = torch.from_numpy(array)
130
+ return model
131
+
132
+
133
+ class MegatronBertEmbeddings(nn.Module):
134
+ """Construct the embeddings from word, position and token_type embeddings."""
135
+
136
+ def __init__(self, config):
137
+ super().__init__()
138
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
139
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
140
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
141
+
142
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
143
+ # any TensorFlow checkpoint file
144
+
145
+ # In Megatron, layer-norm is applied after the 1st dropout.
146
+ # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
147
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
148
+
149
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
150
+ self.register_buffer(
151
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
152
+ )
153
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
154
+
155
+ def forward(
156
+ self,
157
+ input_ids: Optional[torch.LongTensor] = None,
158
+ token_type_ids: Optional[torch.LongTensor] = None,
159
+ position_ids: Optional[torch.LongTensor] = None,
160
+ inputs_embeds: Optional[torch.LongTensor] = None,
161
+ past_key_values_length: int = 0,
162
+ ) -> torch.Tensor:
163
+ if input_ids is not None:
164
+ input_shape = input_ids.size()
165
+ else:
166
+ input_shape = inputs_embeds.size()[:-1]
167
+
168
+ seq_length = input_shape[1]
169
+
170
+ if position_ids is None:
171
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
172
+
173
+ if token_type_ids is None:
174
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
175
+
176
+ if inputs_embeds is None:
177
+ inputs_embeds = self.word_embeddings(input_ids)
178
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
179
+
180
+ embeddings = inputs_embeds + token_type_embeddings
181
+ if self.position_embedding_type == "absolute":
182
+ position_embeddings = self.position_embeddings(position_ids)
183
+ embeddings += position_embeddings
184
+
185
+ # Megatron BERT moves that layer norm after the drop-out (and to each layer).
186
+ # embeddings = self.LayerNorm(embeddings)
187
+ embeddings = self.dropout(embeddings)
188
+ return embeddings
189
+
190
+
191
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MegatronBert
192
+ class MegatronBertSelfAttention(nn.Module):
193
+ def __init__(self, config, position_embedding_type=None):
194
+ super().__init__()
195
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
196
+ raise ValueError(
197
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
198
+ f"heads ({config.num_attention_heads})"
199
+ )
200
+
201
+ self.num_attention_heads = config.num_attention_heads
202
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
203
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
204
+
205
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
206
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
207
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
208
+
209
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
210
+ self.position_embedding_type = position_embedding_type or getattr(
211
+ config, "position_embedding_type", "absolute"
212
+ )
213
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
214
+ self.max_position_embeddings = config.max_position_embeddings
215
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
216
+
217
+ self.is_decoder = config.is_decoder
218
+
219
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
220
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
221
+ x = x.view(new_x_shape)
222
+ return x.permute(0, 2, 1, 3)
223
+
224
+ def forward(
225
+ self,
226
+ hidden_states: torch.Tensor,
227
+ attention_mask: Optional[torch.FloatTensor] = None,
228
+ head_mask: Optional[torch.FloatTensor] = None,
229
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
230
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
231
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
232
+ output_attentions: Optional[bool] = False,
233
+ ) -> Tuple[torch.Tensor]:
234
+ mixed_query_layer = self.query(hidden_states)
235
+
236
+ # If this is instantiated as a cross-attention module, the keys
237
+ # and values come from an encoder; the attention mask needs to be
238
+ # such that the encoder's padding tokens are not attended to.
239
+ is_cross_attention = encoder_hidden_states is not None
240
+
241
+ if is_cross_attention and past_key_value is not None:
242
+ # reuse k,v, cross_attentions
243
+ key_layer = past_key_value[0]
244
+ value_layer = past_key_value[1]
245
+ attention_mask = encoder_attention_mask
246
+ elif is_cross_attention:
247
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
248
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
249
+ attention_mask = encoder_attention_mask
250
+ elif past_key_value is not None:
251
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
252
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
253
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
254
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
255
+ else:
256
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
257
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
258
+
259
+ query_layer = self.transpose_for_scores(mixed_query_layer)
260
+
261
+ use_cache = past_key_value is not None
262
+ if self.is_decoder:
263
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
264
+ # Further calls to cross_attention layer can then reuse all cross-attention
265
+ # key/value_states (first "if" case)
266
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
267
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
268
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
269
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
270
+ past_key_value = (key_layer, value_layer)
271
+
272
+ # Take the dot product between "query" and "key" to get the raw attention scores.
273
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
274
+
275
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
276
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
277
+ if use_cache:
278
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
279
+ -1, 1
280
+ )
281
+ else:
282
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
283
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
284
+ distance = position_ids_l - position_ids_r
285
+
286
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
287
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
288
+
289
+ if self.position_embedding_type == "relative_key":
290
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
291
+ attention_scores = attention_scores + relative_position_scores
292
+ elif self.position_embedding_type == "relative_key_query":
293
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
294
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
295
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
296
+
297
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
298
+ if attention_mask is not None:
299
+ # Apply the attention mask is (precomputed for all layers in MegatronBertModel forward() function)
300
+ attention_scores = attention_scores + attention_mask
301
+
302
+ # Normalize the attention scores to probabilities.
303
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
304
+
305
+ # This is actually dropping out entire tokens to attend to, which might
306
+ # seem a bit unusual, but is taken from the original Transformer paper.
307
+ attention_probs = self.dropout(attention_probs)
308
+
309
+ # Mask heads if we want to
310
+ if head_mask is not None:
311
+ attention_probs = attention_probs * head_mask
312
+
313
+ context_layer = torch.matmul(attention_probs, value_layer)
314
+
315
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
316
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
317
+ context_layer = context_layer.view(new_context_layer_shape)
318
+
319
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
320
+
321
+ if self.is_decoder:
322
+ outputs = outputs + (past_key_value,)
323
+ return outputs
324
+
325
+
326
+ # Based transformers.models.bert.modeling_bert.BertSelfOutput. Moved LayerNorm to MegatronBertAttention below.
327
+ class MegatronBertSelfOutput(nn.Module):
328
+ def __init__(self, config):
329
+ super().__init__()
330
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
331
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
332
+
333
+ def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
334
+ hidden_states = self.dense(hidden_states)
335
+ hidden_states = self.dropout(hidden_states)
336
+ return residual + hidden_states
337
+
338
+
339
+ # Based transformers.models.bert.modeling_bert.BertAttention. Added LayerNorm.
340
+ class MegatronBertAttention(nn.Module):
341
+ def __init__(self, config):
342
+ super().__init__()
343
+ self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
344
+ self.self = MegatronBertSelfAttention(config)
345
+ self.output = MegatronBertSelfOutput(config)
346
+ self.pruned_heads = set()
347
+
348
+ def prune_heads(self, heads):
349
+ if len(heads) == 0:
350
+ return
351
+ heads, index = find_pruneable_heads_and_indices(
352
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
353
+ )
354
+
355
+ # Prune linear layers
356
+ self.self.query = prune_linear_layer(self.self.query, index)
357
+ self.self.key = prune_linear_layer(self.self.key, index)
358
+ self.self.value = prune_linear_layer(self.self.value, index)
359
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
360
+
361
+ # Update hyper params and store pruned heads
362
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
363
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
364
+ self.pruned_heads = self.pruned_heads.union(heads)
365
+
366
+ def forward(
367
+ self,
368
+ hidden_states: torch.Tensor,
369
+ attention_mask: Optional[torch.FloatTensor] = None,
370
+ head_mask: Optional[torch.FloatTensor] = None,
371
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
372
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
373
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
374
+ output_attentions: Optional[bool] = False,
375
+ ) -> Tuple[torch.Tensor]:
376
+ ln_outputs = self.ln(hidden_states)
377
+ self_outputs = self.self(
378
+ ln_outputs,
379
+ attention_mask,
380
+ head_mask,
381
+ encoder_hidden_states,
382
+ encoder_attention_mask,
383
+ past_key_value,
384
+ output_attentions,
385
+ )
386
+ attention_output = self.output(self_outputs[0], hidden_states)
387
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
388
+ return outputs
389
+
390
+
391
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->MegatronBert
392
+ class MegatronBertIntermediate(nn.Module):
393
+ def __init__(self, config):
394
+ super().__init__()
395
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
396
+ if isinstance(config.hidden_act, str):
397
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
398
+ else:
399
+ self.intermediate_act_fn = config.hidden_act
400
+
401
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
402
+ hidden_states = self.dense(hidden_states)
403
+ hidden_states = self.intermediate_act_fn(hidden_states)
404
+ return hidden_states
405
+
406
+
407
+ # Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to MegatronBertLayer below.
408
+ class MegatronBertOutput(nn.Module):
409
+ def __init__(self, config):
410
+ super().__init__()
411
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
412
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
413
+
414
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
415
+ hidden_states = self.dense(hidden_states)
416
+ hidden_states = self.dropout(hidden_states)
417
+ return input_tensor + hidden_states
418
+
419
+
420
+ # Based on transformers.models.bert.modeling_bert.BertLayer. Added LayerNorm.
421
+ class MegatronBertLayer(nn.Module):
422
+ def __init__(self, config):
423
+ super().__init__()
424
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
425
+ self.seq_len_dim = 1
426
+ self.attention = MegatronBertAttention(config)
427
+ self.is_decoder = config.is_decoder
428
+ self.add_cross_attention = config.add_cross_attention
429
+ if self.add_cross_attention:
430
+ if not self.is_decoder:
431
+ raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
432
+ self.crossattention = MegatronBertAttention(config)
433
+ self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
434
+ self.intermediate = MegatronBertIntermediate(config)
435
+ self.output = MegatronBertOutput(config)
436
+
437
+ def forward(
438
+ self,
439
+ hidden_states: torch.Tensor,
440
+ attention_mask: Optional[torch.FloatTensor] = None,
441
+ head_mask: Optional[torch.FloatTensor] = None,
442
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
443
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
444
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
445
+ output_attentions: Optional[bool] = False,
446
+ ) -> Tuple[torch.Tensor]:
447
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
448
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
449
+ self_attention_outputs = self.attention(
450
+ hidden_states,
451
+ attention_mask,
452
+ head_mask,
453
+ output_attentions=output_attentions,
454
+ past_key_value=self_attn_past_key_value,
455
+ )
456
+ attention_output = self_attention_outputs[0]
457
+
458
+ # if decoder, the last output is tuple of self-attn cache
459
+ if self.is_decoder:
460
+ outputs = self_attention_outputs[1:-1]
461
+ present_key_value = self_attention_outputs[-1]
462
+ else:
463
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
464
+
465
+ cross_attn_present_key_value = None
466
+ if self.is_decoder and encoder_hidden_states is not None:
467
+ if not hasattr(self, "crossattention"):
468
+ raise AttributeError(
469
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
470
+ " by setting `config.add_cross_attention=True`"
471
+ )
472
+
473
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
474
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
475
+ cross_attention_outputs = self.crossattention(
476
+ attention_output,
477
+ attention_mask,
478
+ head_mask,
479
+ encoder_hidden_states,
480
+ encoder_attention_mask,
481
+ cross_attn_past_key_value,
482
+ output_attentions,
483
+ )
484
+ attention_output = cross_attention_outputs[0]
485
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
486
+
487
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
488
+ cross_attn_present_key_value = cross_attention_outputs[-1]
489
+ present_key_value = present_key_value + cross_attn_present_key_value
490
+
491
+ layer_output = apply_chunking_to_forward(
492
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
493
+ )
494
+ outputs = (layer_output,) + outputs
495
+
496
+ # if decoder, return the attn key/values as the last output
497
+ if self.is_decoder:
498
+ outputs = outputs + (present_key_value,)
499
+
500
+ return outputs
501
+
502
+ def feed_forward_chunk(self, attention_output):
503
+ ln_output = self.ln(attention_output)
504
+ intermediate_output = self.intermediate(ln_output)
505
+ layer_output = self.output(intermediate_output, attention_output)
506
+ return layer_output
507
+
508
+
509
+ class MegatronBertEncoder(nn.Module):
510
+ def __init__(self, config):
511
+ super().__init__()
512
+ self.config = config
513
+ self.layer = nn.ModuleList([MegatronBertLayer(config) for _ in range(config.num_hidden_layers)])
514
+
515
+ # The final layer norm. We removed the 1st LN, moved LN to each hidden layer and this one
516
+ # is simply the final LN (Transformer's BERT has it attached to each hidden layer).
517
+ self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
518
+ self.gradient_checkpointing = False
519
+
520
+ def forward(
521
+ self,
522
+ hidden_states: torch.Tensor,
523
+ attention_mask: Optional[torch.FloatTensor] = None,
524
+ head_mask: Optional[torch.FloatTensor] = None,
525
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
526
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
527
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
528
+ use_cache: Optional[bool] = None,
529
+ output_attentions: Optional[bool] = False,
530
+ output_hidden_states: Optional[bool] = False,
531
+ return_dict: Optional[bool] = True,
532
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
533
+ if self.gradient_checkpointing and self.training:
534
+ if use_cache:
535
+ logger.warning_once(
536
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
537
+ )
538
+ use_cache = False
539
+ all_hidden_states = () if output_hidden_states else None
540
+ all_self_attentions = () if output_attentions else None
541
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
542
+
543
+ next_decoder_cache = () if use_cache else None
544
+ for i, layer_module in enumerate(self.layer):
545
+ if output_hidden_states:
546
+ all_hidden_states = all_hidden_states + (hidden_states,)
547
+
548
+ layer_head_mask = head_mask[i] if head_mask is not None else None
549
+ past_key_value = past_key_values[i] if past_key_values is not None else None
550
+
551
+ if self.gradient_checkpointing and self.training:
552
+ layer_outputs = self._gradient_checkpointing_func(
553
+ layer_module.__call__,
554
+ hidden_states,
555
+ attention_mask,
556
+ layer_head_mask,
557
+ encoder_hidden_states,
558
+ encoder_attention_mask,
559
+ past_key_value,
560
+ output_attentions,
561
+ )
562
+ else:
563
+ layer_outputs = layer_module(
564
+ hidden_states,
565
+ attention_mask,
566
+ layer_head_mask,
567
+ encoder_hidden_states,
568
+ encoder_attention_mask,
569
+ past_key_value,
570
+ output_attentions,
571
+ )
572
+
573
+ # Because we moved the layer-norm at the end of the hidden layer, we have non-normali-
574
+ # zed data here. If that's really needed, we must apply LN to match Transformer's BERT.
575
+
576
+ hidden_states = layer_outputs[0]
577
+ if use_cache:
578
+ next_decoder_cache += (layer_outputs[-1],)
579
+ if output_attentions:
580
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
581
+ if self.config.add_cross_attention:
582
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
583
+
584
+ # Finalize the hidden states.
585
+ hidden_states = self.ln(hidden_states)
586
+
587
+ if output_hidden_states:
588
+ all_hidden_states = all_hidden_states + (hidden_states,)
589
+
590
+ if not return_dict:
591
+ return tuple(
592
+ v
593
+ for v in [
594
+ hidden_states,
595
+ next_decoder_cache,
596
+ all_hidden_states,
597
+ all_self_attentions,
598
+ all_cross_attentions,
599
+ ]
600
+ if v is not None
601
+ )
602
+ return BaseModelOutputWithPastAndCrossAttentions(
603
+ last_hidden_state=hidden_states,
604
+ past_key_values=next_decoder_cache,
605
+ hidden_states=all_hidden_states,
606
+ attentions=all_self_attentions,
607
+ cross_attentions=all_cross_attentions,
608
+ )
609
+
610
+
611
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->MegatronBert
612
+ class MegatronBertPooler(nn.Module):
613
+ def __init__(self, config):
614
+ super().__init__()
615
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
616
+ self.activation = nn.Tanh()
617
+
618
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
619
+ # We "pool" the model by simply taking the hidden state corresponding
620
+ # to the first token.
621
+ first_token_tensor = hidden_states[:, 0]
622
+ pooled_output = self.dense(first_token_tensor)
623
+ pooled_output = self.activation(pooled_output)
624
+ return pooled_output
625
+
626
+
627
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MegatronBert
628
+ class MegatronBertPredictionHeadTransform(nn.Module):
629
+ def __init__(self, config):
630
+ super().__init__()
631
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
632
+ if isinstance(config.hidden_act, str):
633
+ self.transform_act_fn = ACT2FN[config.hidden_act]
634
+ else:
635
+ self.transform_act_fn = config.hidden_act
636
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
637
+
638
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
639
+ hidden_states = self.dense(hidden_states)
640
+ hidden_states = self.transform_act_fn(hidden_states)
641
+ hidden_states = self.LayerNorm(hidden_states)
642
+ return hidden_states
643
+
644
+
645
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MegatronBert
646
+ class MegatronBertLMPredictionHead(nn.Module):
647
+ def __init__(self, config):
648
+ super().__init__()
649
+ self.transform = MegatronBertPredictionHeadTransform(config)
650
+
651
+ # The output weights are the same as the input embeddings, but there is
652
+ # an output-only bias for each token.
653
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
654
+
655
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
656
+
657
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
658
+ self.decoder.bias = self.bias
659
+
660
+ def forward(self, hidden_states):
661
+ hidden_states = self.transform(hidden_states)
662
+ hidden_states = self.decoder(hidden_states)
663
+ return hidden_states
664
+
665
+
666
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MegatronBert
667
+ class MegatronBertOnlyMLMHead(nn.Module):
668
+ def __init__(self, config):
669
+ super().__init__()
670
+ self.predictions = MegatronBertLMPredictionHead(config)
671
+
672
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
673
+ prediction_scores = self.predictions(sequence_output)
674
+ return prediction_scores
675
+
676
+
677
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->MegatronBert
678
+ class MegatronBertOnlyNSPHead(nn.Module):
679
+ def __init__(self, config):
680
+ super().__init__()
681
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
682
+
683
+ def forward(self, pooled_output):
684
+ seq_relationship_score = self.seq_relationship(pooled_output)
685
+ return seq_relationship_score
686
+
687
+
688
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->MegatronBert
689
+ class MegatronBertPreTrainingHeads(nn.Module):
690
+ def __init__(self, config):
691
+ super().__init__()
692
+ self.predictions = MegatronBertLMPredictionHead(config)
693
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
694
+
695
+ def forward(self, sequence_output, pooled_output):
696
+ prediction_scores = self.predictions(sequence_output)
697
+ seq_relationship_score = self.seq_relationship(pooled_output)
698
+ return prediction_scores, seq_relationship_score
699
+
700
+
701
+ class MegatronBertPreTrainedModel(PreTrainedModel):
702
+ """
703
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
704
+ models.
705
+ """
706
+
707
+ config_class = MegatronBertConfig
708
+ load_tf_weights = load_tf_weights_in_megatron_bert
709
+ base_model_prefix = "bert"
710
+ supports_gradient_checkpointing = True
711
+
712
+ def _init_weights(self, module):
713
+ """Initialize the weights"""
714
+ if isinstance(module, (nn.Linear, nn.Embedding)):
715
+ # Slightly different from the TF version which uses truncated_normal for initialization
716
+ # cf https://github.com/pytorch/pytorch/pull/5617
717
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
718
+ elif isinstance(module, nn.LayerNorm):
719
+ module.bias.data.zero_()
720
+ module.weight.data.fill_(1.0)
721
+ if isinstance(module, nn.Linear) and module.bias is not None:
722
+ module.bias.data.zero_()
723
+
724
+
725
+ @dataclass
726
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->MegatronBert
727
+ class MegatronBertForPreTrainingOutput(ModelOutput):
728
+ """
729
+ Output type of [`MegatronBertForPreTraining`].
730
+
731
+ Args:
732
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
733
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
734
+ (classification) loss.
735
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
736
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
737
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
738
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
739
+ before SoftMax).
740
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
741
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
742
+ shape `(batch_size, sequence_length, hidden_size)`.
743
+
744
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
745
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
746
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
747
+ sequence_length)`.
748
+
749
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
750
+ heads.
751
+ """
752
+
753
+ loss: Optional[torch.FloatTensor] = None
754
+ prediction_logits: torch.FloatTensor = None
755
+ seq_relationship_logits: torch.FloatTensor = None
756
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
757
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
758
+
759
+
760
+ MEGATRON_BERT_START_DOCSTRING = r"""
761
+
762
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
763
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
764
+ etc.)
765
+
766
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
767
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
768
+ and behavior.
769
+
770
+ Parameters:
771
+ config ([`MegatronBertConfig`]): Model configuration class with all the parameters of the model.
772
+ Initializing with a config file does not load the weights associated with the model, only the
773
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
774
+ """
775
+
776
+ MEGATRON_BERT_INPUTS_DOCSTRING = r"""
777
+ Args:
778
+ input_ids (`torch.LongTensor` of shape `({0})`):
779
+ Indices of input sequence tokens in the vocabulary.
780
+
781
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
782
+ [`PreTrainedTokenizer.__call__`] for details.
783
+
784
+ [What are input IDs?](../glossary#input-ids)
785
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
786
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
787
+
788
+ - 1 for tokens that are **not masked**,
789
+ - 0 for tokens that are **masked**.
790
+
791
+ [What are attention masks?](../glossary#attention-mask)
792
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
793
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
794
+ 1]`:
795
+
796
+ - 0 corresponds to a *sentence A* token,
797
+ - 1 corresponds to a *sentence B* token.
798
+
799
+ [What are token type IDs?](../glossary#token-type-ids)
800
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
801
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
802
+ config.max_position_embeddings - 1]`.
803
+
804
+ [What are position IDs?](../glossary#position-ids)
805
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
806
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
807
+
808
+ - 1 indicates the head is **not masked**,
809
+ - 0 indicates the head is **masked**.
810
+
811
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
812
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
813
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
814
+ model's internal embedding lookup matrix.
815
+ output_attentions (`bool`, *optional*):
816
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
817
+ tensors for more detail.
818
+ output_hidden_states (`bool`, *optional*):
819
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
820
+ more detail.
821
+ return_dict (`bool`, *optional*):
822
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
823
+ """
824
+
825
+
826
+ @add_start_docstrings(
827
+ "The bare MegatronBert Model transformer outputting raw hidden-states without any specific head on top.",
828
+ MEGATRON_BERT_START_DOCSTRING,
829
+ )
830
+ class MegatronBertModel(MegatronBertPreTrainedModel):
831
+ """
832
+
833
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
834
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
835
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
836
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
837
+
838
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
839
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
840
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
841
+ """
842
+
843
+ def __init__(self, config, add_pooling_layer=True):
844
+ super().__init__(config)
845
+ self.config = config
846
+
847
+ self.embeddings = MegatronBertEmbeddings(config)
848
+ self.encoder = MegatronBertEncoder(config)
849
+
850
+ self.pooler = MegatronBertPooler(config) if add_pooling_layer else None
851
+
852
+ # Initialize weights and apply final processing
853
+ self.post_init()
854
+
855
+ def get_input_embeddings(self):
856
+ return self.embeddings.word_embeddings
857
+
858
+ def set_input_embeddings(self, value):
859
+ self.embeddings.word_embeddings = value
860
+
861
+ def _prune_heads(self, heads_to_prune):
862
+ """
863
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
864
+ class PreTrainedModel
865
+ """
866
+ for layer, heads in heads_to_prune.items():
867
+ self.encoder.layer[layer].attention.prune_heads(heads)
868
+
869
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
870
+ @add_code_sample_docstrings(
871
+ checkpoint=_CHECKPOINT_FOR_DOC,
872
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
873
+ config_class=_CONFIG_FOR_DOC,
874
+ )
875
+ def forward(
876
+ self,
877
+ input_ids: Optional[torch.LongTensor] = None,
878
+ attention_mask: Optional[torch.FloatTensor] = None,
879
+ token_type_ids: Optional[torch.LongTensor] = None,
880
+ position_ids: Optional[torch.LongTensor] = None,
881
+ head_mask: Optional[torch.FloatTensor] = None,
882
+ inputs_embeds: Optional[torch.FloatTensor] = None,
883
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
884
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
885
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
886
+ use_cache: Optional[bool] = None,
887
+ output_attentions: Optional[bool] = None,
888
+ output_hidden_states: Optional[bool] = None,
889
+ return_dict: Optional[bool] = None,
890
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
891
+ r"""
892
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
893
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
894
+ the model is configured as a decoder.
895
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
896
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
897
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
898
+
899
+ - 1 for tokens that are **not masked**,
900
+ - 0 for tokens that are **masked**.
901
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
902
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
903
+
904
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
905
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
906
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
907
+ use_cache (`bool`, *optional*):
908
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
909
+ `past_key_values`).
910
+ """
911
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
912
+ output_hidden_states = (
913
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
914
+ )
915
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
916
+
917
+ if self.config.is_decoder:
918
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
919
+ else:
920
+ use_cache = False
921
+
922
+ if input_ids is not None and inputs_embeds is not None:
923
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
924
+ elif input_ids is not None:
925
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
926
+ input_shape = input_ids.size()
927
+ elif inputs_embeds is not None:
928
+ input_shape = inputs_embeds.size()[:-1]
929
+ else:
930
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
931
+
932
+ batch_size, seq_length = input_shape
933
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
934
+
935
+ # past_key_values_length
936
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
937
+
938
+ if attention_mask is None:
939
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
940
+ if token_type_ids is None:
941
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
942
+
943
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
944
+ # ourselves in which case we just need to make it broadcastable to all heads.
945
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
946
+
947
+ # If a 2D or 3D attention mask is provided for the cross-attention
948
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
949
+ if self.config.is_decoder and encoder_hidden_states is not None:
950
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
951
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
952
+ if encoder_attention_mask is None:
953
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
954
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
955
+ else:
956
+ encoder_extended_attention_mask = None
957
+
958
+ # Prepare head mask if needed
959
+ # 1.0 in head_mask indicate we keep the head
960
+ # attention_probs has shape bsz x n_heads x N x N
961
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
962
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
963
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
964
+
965
+ embedding_output = self.embeddings(
966
+ input_ids=input_ids,
967
+ position_ids=position_ids,
968
+ token_type_ids=token_type_ids,
969
+ inputs_embeds=inputs_embeds,
970
+ past_key_values_length=past_key_values_length,
971
+ )
972
+ encoder_outputs = self.encoder(
973
+ embedding_output,
974
+ attention_mask=extended_attention_mask,
975
+ head_mask=head_mask,
976
+ encoder_hidden_states=encoder_hidden_states,
977
+ encoder_attention_mask=encoder_extended_attention_mask,
978
+ past_key_values=past_key_values,
979
+ use_cache=use_cache,
980
+ output_attentions=output_attentions,
981
+ output_hidden_states=output_hidden_states,
982
+ return_dict=return_dict,
983
+ )
984
+ sequence_output = encoder_outputs[0]
985
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
986
+
987
+ if not return_dict:
988
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
989
+
990
+ return BaseModelOutputWithPoolingAndCrossAttentions(
991
+ last_hidden_state=sequence_output,
992
+ pooler_output=pooled_output,
993
+ past_key_values=encoder_outputs.past_key_values,
994
+ hidden_states=encoder_outputs.hidden_states,
995
+ attentions=encoder_outputs.attentions,
996
+ cross_attentions=encoder_outputs.cross_attentions,
997
+ )
998
+
999
+
1000
+ @add_start_docstrings(
1001
+ """
1002
+ MegatronBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
1003
+ `next sentence prediction (classification)` head.
1004
+ """,
1005
+ MEGATRON_BERT_START_DOCSTRING,
1006
+ )
1007
+ class MegatronBertForPreTraining(MegatronBertPreTrainedModel):
1008
+ _tied_weights_keys = ["cls.predictions.decoder"]
1009
+
1010
+ def __init__(self, config, add_binary_head=True):
1011
+ super().__init__(config)
1012
+
1013
+ self.bert = MegatronBertModel(config)
1014
+ self.cls = MegatronBertPreTrainingHeads(config)
1015
+
1016
+ # Initialize weights and apply final processing
1017
+ self.post_init()
1018
+
1019
+ def get_output_embeddings(self):
1020
+ return self.cls.predictions.decoder
1021
+
1022
+ def set_output_embeddings(self, new_embeddings):
1023
+ self.cls.predictions.decoder = new_embeddings
1024
+
1025
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1026
+ @replace_return_docstrings(output_type=MegatronBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1027
+ def forward(
1028
+ self,
1029
+ input_ids: Optional[torch.LongTensor] = None,
1030
+ attention_mask: Optional[torch.FloatTensor] = None,
1031
+ token_type_ids: Optional[torch.LongTensor] = None,
1032
+ position_ids: Optional[torch.LongTensor] = None,
1033
+ head_mask: Optional[torch.FloatTensor] = None,
1034
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1035
+ labels: Optional[torch.LongTensor] = None,
1036
+ next_sentence_label: Optional[torch.LongTensor] = None,
1037
+ output_attentions: Optional[bool] = None,
1038
+ output_hidden_states: Optional[bool] = None,
1039
+ return_dict: Optional[bool] = None,
1040
+ ) -> Union[Tuple, MegatronBertForPreTrainingOutput]:
1041
+ r"""
1042
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1043
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1044
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1045
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1046
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1047
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1048
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
1049
+
1050
+ - 0 indicates sequence B is a continuation of sequence A,
1051
+ - 1 indicates sequence B is a random sequence.
1052
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1053
+ Used to hide legacy arguments that have been deprecated.
1054
+
1055
+ Returns:
1056
+
1057
+ Example:
1058
+
1059
+ ```python
1060
+ >>> from transformers import AutoTokenizer, MegatronBertForPreTraining
1061
+ >>> import torch
1062
+
1063
+ >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
1064
+ >>> model = MegatronBertForPreTraining.from_pretrained("nvidia/megatron-bert-cased-345m")
1065
+
1066
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1067
+ >>> outputs = model(**inputs)
1068
+
1069
+ >>> prediction_logits = outputs.prediction_logits
1070
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1071
+ ```"""
1072
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1073
+
1074
+ outputs = self.bert(
1075
+ input_ids,
1076
+ attention_mask=attention_mask,
1077
+ token_type_ids=token_type_ids,
1078
+ position_ids=position_ids,
1079
+ head_mask=head_mask,
1080
+ inputs_embeds=inputs_embeds,
1081
+ output_attentions=output_attentions,
1082
+ output_hidden_states=output_hidden_states,
1083
+ return_dict=return_dict,
1084
+ )
1085
+
1086
+ sequence_output, pooled_output = outputs[:2]
1087
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1088
+
1089
+ total_loss = None
1090
+ if labels is not None and next_sentence_label is not None:
1091
+ loss_fct = CrossEntropyLoss()
1092
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1093
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1094
+ total_loss = masked_lm_loss + next_sentence_loss
1095
+
1096
+ if not return_dict:
1097
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1098
+ return ((total_loss,) + output) if total_loss is not None else output
1099
+
1100
+ return MegatronBertForPreTrainingOutput(
1101
+ loss=total_loss,
1102
+ prediction_logits=prediction_scores,
1103
+ seq_relationship_logits=seq_relationship_score,
1104
+ hidden_states=outputs.hidden_states,
1105
+ attentions=outputs.attentions,
1106
+ )
1107
+
1108
+
1109
+ @add_start_docstrings(
1110
+ """MegatronBert Model with a `language modeling` head on top for CLM fine-tuning.""",
1111
+ MEGATRON_BERT_START_DOCSTRING,
1112
+ )
1113
+ class MegatronBertForCausalLM(MegatronBertPreTrainedModel):
1114
+ _tied_weights_keys = ["cls.predictions.decoder"]
1115
+
1116
+ def __init__(self, config):
1117
+ super().__init__(config)
1118
+
1119
+ if not config.is_decoder:
1120
+ logger.warning("If you want to use `MegatronBertForCausalLM` as a standalone, add `is_decoder=True.`")
1121
+
1122
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1123
+ self.cls = MegatronBertOnlyMLMHead(config)
1124
+
1125
+ # Initialize weights and apply final processing
1126
+ self.post_init()
1127
+
1128
+ def get_output_embeddings(self):
1129
+ return self.cls.predictions.decoder
1130
+
1131
+ def set_output_embeddings(self, new_embeddings):
1132
+ self.cls.predictions.decoder = new_embeddings
1133
+
1134
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1135
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1136
+ def forward(
1137
+ self,
1138
+ input_ids: Optional[torch.LongTensor] = None,
1139
+ attention_mask: Optional[torch.FloatTensor] = None,
1140
+ token_type_ids: Optional[torch.LongTensor] = None,
1141
+ position_ids: Optional[torch.LongTensor] = None,
1142
+ head_mask: Optional[torch.FloatTensor] = None,
1143
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1144
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1145
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1146
+ labels: Optional[torch.LongTensor] = None,
1147
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1148
+ use_cache: Optional[bool] = None,
1149
+ output_attentions: Optional[bool] = None,
1150
+ output_hidden_states: Optional[bool] = None,
1151
+ return_dict: Optional[bool] = None,
1152
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1153
+ r"""
1154
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1155
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1156
+ the model is configured as a decoder.
1157
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1158
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1159
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1160
+
1161
+ - 1 for tokens that are **not masked**,
1162
+ - 0 for tokens that are **masked**.
1163
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1164
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1165
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1166
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1167
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1168
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1169
+
1170
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1171
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1172
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1173
+ use_cache (`bool`, *optional*):
1174
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1175
+ `past_key_values`).
1176
+
1177
+ Returns:
1178
+
1179
+ Example:
1180
+
1181
+ ```python
1182
+ >>> from transformers import AutoTokenizer, MegatronBertForCausalLM, MegatronBertConfig
1183
+ >>> import torch
1184
+
1185
+ >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
1186
+ >>> model = MegatronBertForCausalLM.from_pretrained("nvidia/megatron-bert-cased-345m", is_decoder=True)
1187
+
1188
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1189
+ >>> outputs = model(**inputs)
1190
+
1191
+ >>> prediction_logits = outputs.logits
1192
+ ```"""
1193
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1194
+ if labels is not None:
1195
+ use_cache = False
1196
+
1197
+ outputs = self.bert(
1198
+ input_ids,
1199
+ attention_mask=attention_mask,
1200
+ token_type_ids=token_type_ids,
1201
+ position_ids=position_ids,
1202
+ head_mask=head_mask,
1203
+ inputs_embeds=inputs_embeds,
1204
+ encoder_hidden_states=encoder_hidden_states,
1205
+ encoder_attention_mask=encoder_attention_mask,
1206
+ past_key_values=past_key_values,
1207
+ use_cache=use_cache,
1208
+ output_attentions=output_attentions,
1209
+ output_hidden_states=output_hidden_states,
1210
+ return_dict=return_dict,
1211
+ )
1212
+
1213
+ sequence_output = outputs[0]
1214
+ prediction_scores = self.cls(sequence_output)
1215
+
1216
+ lm_loss = None
1217
+ if labels is not None:
1218
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1219
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1220
+ labels = labels[:, 1:].contiguous()
1221
+ loss_fct = CrossEntropyLoss()
1222
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1223
+
1224
+ if not return_dict:
1225
+ output = (prediction_scores,) + outputs[2:]
1226
+ return ((lm_loss,) + output) if lm_loss is not None else output
1227
+
1228
+ return CausalLMOutputWithCrossAttentions(
1229
+ loss=lm_loss,
1230
+ logits=prediction_scores,
1231
+ past_key_values=outputs.past_key_values,
1232
+ hidden_states=outputs.hidden_states,
1233
+ attentions=outputs.attentions,
1234
+ cross_attentions=outputs.cross_attentions,
1235
+ )
1236
+
1237
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1238
+ input_shape = input_ids.shape
1239
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1240
+ if attention_mask is None:
1241
+ attention_mask = input_ids.new_ones(input_shape)
1242
+
1243
+ # cut decoder_input_ids if past_key_values is used
1244
+ if past_key_values is not None:
1245
+ past_length = past_key_values[0][0].shape[2]
1246
+
1247
+ # Some generation methods already pass only the last input ID
1248
+ if input_ids.shape[1] > past_length:
1249
+ remove_prefix_length = past_length
1250
+ else:
1251
+ # Default to old behavior: keep only final ID
1252
+ remove_prefix_length = input_ids.shape[1] - 1
1253
+
1254
+ input_ids = input_ids[:, remove_prefix_length:]
1255
+
1256
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1257
+
1258
+ def _reorder_cache(self, past_key_values, beam_idx):
1259
+ reordered_past = ()
1260
+ for layer_past in past_key_values:
1261
+ reordered_past += (
1262
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1263
+ )
1264
+ return reordered_past
1265
+
1266
+
1267
+ @add_start_docstrings("""MegatronBert Model with a `language modeling` head on top.""", MEGATRON_BERT_START_DOCSTRING)
1268
+ class MegatronBertForMaskedLM(MegatronBertPreTrainedModel):
1269
+ _tied_weights_keys = ["cls.predictions.decoder"]
1270
+
1271
+ def __init__(self, config):
1272
+ super().__init__(config)
1273
+
1274
+ if config.is_decoder:
1275
+ logger.warning(
1276
+ "If you want to use `MegatronBertForMaskedLM` make sure `config.is_decoder=False` for "
1277
+ "bi-directional self-attention."
1278
+ )
1279
+
1280
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1281
+ self.cls = MegatronBertOnlyMLMHead(config)
1282
+
1283
+ # Initialize weights and apply final processing
1284
+ self.post_init()
1285
+
1286
+ def get_output_embeddings(self):
1287
+ return self.cls.predictions.decoder
1288
+
1289
+ def set_output_embeddings(self, new_embeddings):
1290
+ self.cls.predictions.decoder = new_embeddings
1291
+
1292
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1293
+ @add_code_sample_docstrings(
1294
+ checkpoint=_CHECKPOINT_FOR_DOC,
1295
+ output_type=MaskedLMOutput,
1296
+ config_class=_CONFIG_FOR_DOC,
1297
+ )
1298
+ def forward(
1299
+ self,
1300
+ input_ids: Optional[torch.LongTensor] = None,
1301
+ attention_mask: Optional[torch.FloatTensor] = None,
1302
+ token_type_ids: Optional[torch.LongTensor] = None,
1303
+ position_ids: Optional[torch.LongTensor] = None,
1304
+ head_mask: Optional[torch.FloatTensor] = None,
1305
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1306
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1307
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1308
+ labels: Optional[torch.LongTensor] = None,
1309
+ output_attentions: Optional[bool] = None,
1310
+ output_hidden_states: Optional[bool] = None,
1311
+ return_dict: Optional[bool] = None,
1312
+ ) -> Union[Tuple, MaskedLMOutput]:
1313
+ r"""
1314
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1315
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1316
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1317
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1318
+ """
1319
+
1320
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1321
+
1322
+ outputs = self.bert(
1323
+ input_ids,
1324
+ attention_mask=attention_mask,
1325
+ token_type_ids=token_type_ids,
1326
+ position_ids=position_ids,
1327
+ head_mask=head_mask,
1328
+ inputs_embeds=inputs_embeds,
1329
+ encoder_hidden_states=encoder_hidden_states,
1330
+ encoder_attention_mask=encoder_attention_mask,
1331
+ output_attentions=output_attentions,
1332
+ output_hidden_states=output_hidden_states,
1333
+ return_dict=return_dict,
1334
+ )
1335
+
1336
+ sequence_output = outputs[0]
1337
+ prediction_scores = self.cls(sequence_output)
1338
+
1339
+ masked_lm_loss = None
1340
+ if labels is not None:
1341
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1342
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1343
+
1344
+ if not return_dict:
1345
+ output = (prediction_scores,) + outputs[2:]
1346
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1347
+
1348
+ return MaskedLMOutput(
1349
+ loss=masked_lm_loss,
1350
+ logits=prediction_scores,
1351
+ hidden_states=outputs.hidden_states,
1352
+ attentions=outputs.attentions,
1353
+ )
1354
+
1355
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1356
+ input_shape = input_ids.shape
1357
+ effective_batch_size = input_shape[0]
1358
+
1359
+ # add a dummy token
1360
+ if self.config.pad_token_id is None:
1361
+ raise ValueError("The PAD token should be defined for generation")
1362
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1363
+ dummy_token = torch.full(
1364
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1365
+ )
1366
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1367
+
1368
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1369
+
1370
+
1371
+ @add_start_docstrings(
1372
+ """MegatronBert Model with a `next sentence prediction (classification)` head on top.""",
1373
+ MEGATRON_BERT_START_DOCSTRING,
1374
+ )
1375
+ class MegatronBertForNextSentencePrediction(MegatronBertPreTrainedModel):
1376
+ def __init__(self, config):
1377
+ super().__init__(config)
1378
+
1379
+ self.bert = MegatronBertModel(config)
1380
+ self.cls = MegatronBertOnlyNSPHead(config)
1381
+
1382
+ # Initialize weights and apply final processing
1383
+ self.post_init()
1384
+
1385
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1386
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1387
+ def forward(
1388
+ self,
1389
+ input_ids: Optional[torch.LongTensor] = None,
1390
+ attention_mask: Optional[torch.FloatTensor] = None,
1391
+ token_type_ids: Optional[torch.LongTensor] = None,
1392
+ position_ids: Optional[torch.LongTensor] = None,
1393
+ head_mask: Optional[torch.FloatTensor] = None,
1394
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1395
+ labels: Optional[torch.LongTensor] = None,
1396
+ output_attentions: Optional[bool] = None,
1397
+ output_hidden_states: Optional[bool] = None,
1398
+ return_dict: Optional[bool] = None,
1399
+ **kwargs,
1400
+ ) -> Union[Tuple, NextSentencePredictorOutput]:
1401
+ r"""
1402
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1403
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1404
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1405
+
1406
+ - 0 indicates sequence B is a continuation of sequence A,
1407
+ - 1 indicates sequence B is a random sequence.
1408
+
1409
+ Returns:
1410
+
1411
+ Example:
1412
+
1413
+ ```python
1414
+ >>> from transformers import AutoTokenizer, MegatronBertForNextSentencePrediction
1415
+ >>> import torch
1416
+
1417
+ >>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
1418
+ >>> model = MegatronBertForNextSentencePrediction.from_pretrained("nvidia/megatron-bert-cased-345m")
1419
+
1420
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1421
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1422
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1423
+
1424
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1425
+ >>> logits = outputs.logits
1426
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1427
+ ```"""
1428
+
1429
+ if "next_sentence_label" in kwargs:
1430
+ warnings.warn(
1431
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1432
+ " `labels` instead.",
1433
+ FutureWarning,
1434
+ )
1435
+ labels = kwargs.pop("next_sentence_label")
1436
+
1437
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1438
+
1439
+ outputs = self.bert(
1440
+ input_ids,
1441
+ attention_mask=attention_mask,
1442
+ token_type_ids=token_type_ids,
1443
+ position_ids=position_ids,
1444
+ head_mask=head_mask,
1445
+ inputs_embeds=inputs_embeds,
1446
+ output_attentions=output_attentions,
1447
+ output_hidden_states=output_hidden_states,
1448
+ return_dict=return_dict,
1449
+ )
1450
+
1451
+ pooled_output = outputs[1]
1452
+
1453
+ seq_relationship_scores = self.cls(pooled_output)
1454
+
1455
+ next_sentence_loss = None
1456
+ if labels is not None:
1457
+ loss_fct = CrossEntropyLoss()
1458
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1459
+
1460
+ if not return_dict:
1461
+ output = (seq_relationship_scores,) + outputs[2:]
1462
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1463
+
1464
+ return NextSentencePredictorOutput(
1465
+ loss=next_sentence_loss,
1466
+ logits=seq_relationship_scores,
1467
+ hidden_states=outputs.hidden_states,
1468
+ attentions=outputs.attentions,
1469
+ )
1470
+
1471
+
1472
+ @add_start_docstrings(
1473
+ """
1474
+ MegatronBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1475
+ pooled output) e.g. for GLUE tasks.
1476
+ """,
1477
+ MEGATRON_BERT_START_DOCSTRING,
1478
+ )
1479
+ class MegatronBertForSequenceClassification(MegatronBertPreTrainedModel):
1480
+ def __init__(self, config):
1481
+ super().__init__(config)
1482
+ self.num_labels = config.num_labels
1483
+
1484
+ self.bert = MegatronBertModel(config)
1485
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1486
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1487
+
1488
+ # Initialize weights and apply final processing
1489
+ self.post_init()
1490
+
1491
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1492
+ @add_code_sample_docstrings(
1493
+ checkpoint=_CHECKPOINT_FOR_DOC,
1494
+ output_type=SequenceClassifierOutput,
1495
+ config_class=_CONFIG_FOR_DOC,
1496
+ )
1497
+ def forward(
1498
+ self,
1499
+ input_ids: Optional[torch.LongTensor] = None,
1500
+ attention_mask: Optional[torch.FloatTensor] = None,
1501
+ token_type_ids: Optional[torch.LongTensor] = None,
1502
+ position_ids: Optional[torch.LongTensor] = None,
1503
+ head_mask: Optional[torch.FloatTensor] = None,
1504
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1505
+ labels: Optional[torch.LongTensor] = None,
1506
+ output_attentions: Optional[bool] = None,
1507
+ output_hidden_states: Optional[bool] = None,
1508
+ return_dict: Optional[bool] = None,
1509
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1510
+ r"""
1511
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1512
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1513
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1514
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1515
+ """
1516
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1517
+
1518
+ outputs = self.bert(
1519
+ input_ids,
1520
+ attention_mask=attention_mask,
1521
+ token_type_ids=token_type_ids,
1522
+ position_ids=position_ids,
1523
+ head_mask=head_mask,
1524
+ inputs_embeds=inputs_embeds,
1525
+ output_attentions=output_attentions,
1526
+ output_hidden_states=output_hidden_states,
1527
+ return_dict=return_dict,
1528
+ )
1529
+
1530
+ pooled_output = outputs[1]
1531
+
1532
+ pooled_output = self.dropout(pooled_output)
1533
+ logits = self.classifier(pooled_output)
1534
+
1535
+ loss = None
1536
+ if labels is not None:
1537
+ if self.config.problem_type is None:
1538
+ if self.num_labels == 1:
1539
+ self.config.problem_type = "regression"
1540
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1541
+ self.config.problem_type = "single_label_classification"
1542
+ else:
1543
+ self.config.problem_type = "multi_label_classification"
1544
+
1545
+ if self.config.problem_type == "regression":
1546
+ loss_fct = MSELoss()
1547
+ if self.num_labels == 1:
1548
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1549
+ else:
1550
+ loss = loss_fct(logits, labels)
1551
+ elif self.config.problem_type == "single_label_classification":
1552
+ loss_fct = CrossEntropyLoss()
1553
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1554
+ elif self.config.problem_type == "multi_label_classification":
1555
+ loss_fct = BCEWithLogitsLoss()
1556
+ loss = loss_fct(logits, labels)
1557
+ if not return_dict:
1558
+ output = (logits,) + outputs[2:]
1559
+ return ((loss,) + output) if loss is not None else output
1560
+
1561
+ return SequenceClassifierOutput(
1562
+ loss=loss,
1563
+ logits=logits,
1564
+ hidden_states=outputs.hidden_states,
1565
+ attentions=outputs.attentions,
1566
+ )
1567
+
1568
+
1569
+ @add_start_docstrings(
1570
+ """
1571
+ MegatronBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output
1572
+ and a softmax) e.g. for RocStories/SWAG tasks.
1573
+ """,
1574
+ MEGATRON_BERT_START_DOCSTRING,
1575
+ )
1576
+ class MegatronBertForMultipleChoice(MegatronBertPreTrainedModel):
1577
+ def __init__(self, config):
1578
+ super().__init__(config)
1579
+
1580
+ self.bert = MegatronBertModel(config)
1581
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1582
+ self.classifier = nn.Linear(config.hidden_size, 1)
1583
+
1584
+ # Initialize weights and apply final processing
1585
+ self.post_init()
1586
+
1587
+ @add_start_docstrings_to_model_forward(
1588
+ MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1589
+ )
1590
+ @add_code_sample_docstrings(
1591
+ checkpoint=_CHECKPOINT_FOR_DOC,
1592
+ output_type=MultipleChoiceModelOutput,
1593
+ config_class=_CONFIG_FOR_DOC,
1594
+ )
1595
+ def forward(
1596
+ self,
1597
+ input_ids: Optional[torch.LongTensor] = None,
1598
+ attention_mask: Optional[torch.FloatTensor] = None,
1599
+ token_type_ids: Optional[torch.LongTensor] = None,
1600
+ position_ids: Optional[torch.LongTensor] = None,
1601
+ head_mask: Optional[torch.FloatTensor] = None,
1602
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1603
+ labels: Optional[torch.LongTensor] = None,
1604
+ output_attentions: Optional[bool] = None,
1605
+ output_hidden_states: Optional[bool] = None,
1606
+ return_dict: Optional[bool] = None,
1607
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1608
+ r"""
1609
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1610
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1611
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1612
+ `input_ids` above)
1613
+ """
1614
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1615
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1616
+
1617
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1618
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1619
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1620
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1621
+ inputs_embeds = (
1622
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1623
+ if inputs_embeds is not None
1624
+ else None
1625
+ )
1626
+
1627
+ outputs = self.bert(
1628
+ input_ids,
1629
+ attention_mask=attention_mask,
1630
+ token_type_ids=token_type_ids,
1631
+ position_ids=position_ids,
1632
+ head_mask=head_mask,
1633
+ inputs_embeds=inputs_embeds,
1634
+ output_attentions=output_attentions,
1635
+ output_hidden_states=output_hidden_states,
1636
+ return_dict=return_dict,
1637
+ )
1638
+
1639
+ pooled_output = outputs[1]
1640
+
1641
+ pooled_output = self.dropout(pooled_output)
1642
+ logits = self.classifier(pooled_output)
1643
+ reshaped_logits = logits.view(-1, num_choices)
1644
+
1645
+ loss = None
1646
+ if labels is not None:
1647
+ loss_fct = CrossEntropyLoss()
1648
+ loss = loss_fct(reshaped_logits, labels)
1649
+
1650
+ if not return_dict:
1651
+ output = (reshaped_logits,) + outputs[2:]
1652
+ return ((loss,) + output) if loss is not None else output
1653
+
1654
+ return MultipleChoiceModelOutput(
1655
+ loss=loss,
1656
+ logits=reshaped_logits,
1657
+ hidden_states=outputs.hidden_states,
1658
+ attentions=outputs.attentions,
1659
+ )
1660
+
1661
+
1662
+ @add_start_docstrings(
1663
+ """
1664
+ MegatronBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1665
+ for Named-Entity-Recognition (NER) tasks.
1666
+ """,
1667
+ MEGATRON_BERT_START_DOCSTRING,
1668
+ )
1669
+ class MegatronBertForTokenClassification(MegatronBertPreTrainedModel):
1670
+ def __init__(self, config):
1671
+ super().__init__(config)
1672
+ self.num_labels = config.num_labels
1673
+
1674
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1675
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1676
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1677
+
1678
+ # Initialize weights and apply final processing
1679
+ self.post_init()
1680
+
1681
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1682
+ @add_code_sample_docstrings(
1683
+ checkpoint=_CHECKPOINT_FOR_DOC,
1684
+ output_type=TokenClassifierOutput,
1685
+ config_class=_CONFIG_FOR_DOC,
1686
+ )
1687
+ def forward(
1688
+ self,
1689
+ input_ids: Optional[torch.LongTensor] = None,
1690
+ attention_mask: Optional[torch.FloatTensor] = None,
1691
+ token_type_ids: Optional[torch.LongTensor] = None,
1692
+ position_ids: Optional[torch.LongTensor] = None,
1693
+ head_mask: Optional[torch.FloatTensor] = None,
1694
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1695
+ labels: Optional[torch.LongTensor] = None,
1696
+ output_attentions: Optional[bool] = None,
1697
+ output_hidden_states: Optional[bool] = None,
1698
+ return_dict: Optional[bool] = None,
1699
+ ) -> Union[Tuple, TokenClassifierOutput]:
1700
+ r"""
1701
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1702
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1703
+ """
1704
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1705
+
1706
+ outputs = self.bert(
1707
+ input_ids,
1708
+ attention_mask=attention_mask,
1709
+ token_type_ids=token_type_ids,
1710
+ position_ids=position_ids,
1711
+ head_mask=head_mask,
1712
+ inputs_embeds=inputs_embeds,
1713
+ output_attentions=output_attentions,
1714
+ output_hidden_states=output_hidden_states,
1715
+ return_dict=return_dict,
1716
+ )
1717
+
1718
+ sequence_output = outputs[0]
1719
+
1720
+ sequence_output = self.dropout(sequence_output)
1721
+ logits = self.classifier(sequence_output)
1722
+
1723
+ loss = None
1724
+ if labels is not None:
1725
+ loss_fct = CrossEntropyLoss()
1726
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1727
+
1728
+ if not return_dict:
1729
+ output = (logits,) + outputs[2:]
1730
+ return ((loss,) + output) if loss is not None else output
1731
+
1732
+ return TokenClassifierOutput(
1733
+ loss=loss,
1734
+ logits=logits,
1735
+ hidden_states=outputs.hidden_states,
1736
+ attentions=outputs.attentions,
1737
+ )
1738
+
1739
+
1740
+ @add_start_docstrings(
1741
+ """
1742
+ MegatronBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1743
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1744
+ """,
1745
+ MEGATRON_BERT_START_DOCSTRING,
1746
+ )
1747
+ class MegatronBertForQuestionAnswering(MegatronBertPreTrainedModel):
1748
+ def __init__(self, config):
1749
+ super().__init__(config)
1750
+ self.num_labels = config.num_labels
1751
+
1752
+ self.bert = MegatronBertModel(config, add_pooling_layer=False)
1753
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1754
+
1755
+ # Initialize weights and apply final processing
1756
+ self.post_init()
1757
+
1758
+ @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1759
+ @add_code_sample_docstrings(
1760
+ checkpoint=_CHECKPOINT_FOR_DOC,
1761
+ output_type=QuestionAnsweringModelOutput,
1762
+ config_class=_CONFIG_FOR_DOC,
1763
+ )
1764
+ def forward(
1765
+ self,
1766
+ input_ids: Optional[torch.LongTensor] = None,
1767
+ attention_mask: Optional[torch.FloatTensor] = None,
1768
+ token_type_ids: Optional[torch.LongTensor] = None,
1769
+ position_ids: Optional[torch.LongTensor] = None,
1770
+ head_mask: Optional[torch.FloatTensor] = None,
1771
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1772
+ start_positions: Optional[torch.LongTensor] = None,
1773
+ end_positions: Optional[torch.LongTensor] = None,
1774
+ output_attentions: Optional[bool] = None,
1775
+ output_hidden_states: Optional[bool] = None,
1776
+ return_dict: Optional[bool] = None,
1777
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1778
+ r"""
1779
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1780
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1781
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1782
+ are not taken into account for computing the loss.
1783
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1784
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1785
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1786
+ are not taken into account for computing the loss.
1787
+ """
1788
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1789
+
1790
+ outputs = self.bert(
1791
+ input_ids,
1792
+ attention_mask=attention_mask,
1793
+ token_type_ids=token_type_ids,
1794
+ position_ids=position_ids,
1795
+ head_mask=head_mask,
1796
+ inputs_embeds=inputs_embeds,
1797
+ output_attentions=output_attentions,
1798
+ output_hidden_states=output_hidden_states,
1799
+ return_dict=return_dict,
1800
+ )
1801
+
1802
+ sequence_output = outputs[0]
1803
+
1804
+ logits = self.qa_outputs(sequence_output)
1805
+ start_logits, end_logits = logits.split(1, dim=-1)
1806
+ start_logits = start_logits.squeeze(-1).contiguous()
1807
+ end_logits = end_logits.squeeze(-1).contiguous()
1808
+
1809
+ total_loss = None
1810
+ if start_positions is not None and end_positions is not None:
1811
+ # If we are on multi-GPU, split add a dimension
1812
+ if len(start_positions.size()) > 1:
1813
+ start_positions = start_positions.squeeze(-1)
1814
+ if len(end_positions.size()) > 1:
1815
+ end_positions = end_positions.squeeze(-1)
1816
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1817
+ ignored_index = start_logits.size(1)
1818
+ start_positions = start_positions.clamp(0, ignored_index)
1819
+ end_positions = end_positions.clamp(0, ignored_index)
1820
+
1821
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1822
+ start_loss = loss_fct(start_logits, start_positions)
1823
+ end_loss = loss_fct(end_logits, end_positions)
1824
+ total_loss = (start_loss + end_loss) / 2
1825
+
1826
+ if not return_dict:
1827
+ output = (start_logits, end_logits) + outputs[2:]
1828
+ return ((total_loss,) + output) if total_loss is not None else output
1829
+
1830
+ return QuestionAnsweringModelOutput(
1831
+ loss=total_loss,
1832
+ start_logits=start_logits,
1833
+ end_logits=end_logits,
1834
+ hidden_states=outputs.hidden_states,
1835
+ attentions=outputs.attentions,
1836
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mobilenet_v1": [
21
+ "MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "MobileNetV1Config",
23
+ "MobileNetV1OnnxConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_vision_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["feature_extraction_mobilenet_v1"] = ["MobileNetV1FeatureExtractor"]
34
+ _import_structure["image_processing_mobilenet_v1"] = ["MobileNetV1ImageProcessor"]
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_mobilenet_v1"] = [
43
+ "MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "MobileNetV1ForImageClassification",
45
+ "MobileNetV1Model",
46
+ "MobileNetV1PreTrainedModel",
47
+ "load_tf_weights_in_mobilenet_v1",
48
+ ]
49
+
50
+
51
+ if TYPE_CHECKING:
52
+ from .configuration_mobilenet_v1 import (
53
+ MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP,
54
+ MobileNetV1Config,
55
+ MobileNetV1OnnxConfig,
56
+ )
57
+
58
+ try:
59
+ if not is_vision_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .feature_extraction_mobilenet_v1 import MobileNetV1FeatureExtractor
65
+ from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor
66
+
67
+ try:
68
+ if not is_torch_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .modeling_mobilenet_v1 import (
74
+ MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST,
75
+ MobileNetV1ForImageClassification,
76
+ MobileNetV1Model,
77
+ MobileNetV1PreTrainedModel,
78
+ load_tf_weights_in_mobilenet_v1,
79
+ )
80
+
81
+
82
+ else:
83
+ import sys
84
+
85
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/configuration_mobilenet_v1.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/feature_extraction_mobilenet_v1.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/image_processing_mobilenet_v1.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/modeling_mobilenet_v1.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MobileNetV1 model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class MobileNetV1Config(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`MobileNetV1Model`]. It is used to instantiate a
36
+ MobileNetV1 model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the MobileNetV1
38
+ [google/mobilenet_v1_1.0_224](https://huggingface.co/google/mobilenet_v1_1.0_224) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ num_channels (`int`, *optional*, defaults to 3):
45
+ The number of input channels.
46
+ image_size (`int`, *optional*, defaults to 224):
47
+ The size (resolution) of each image.
48
+ depth_multiplier (`float`, *optional*, defaults to 1.0):
49
+ Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
50
+ channels. This is sometimes also called "alpha" or "width multiplier".
51
+ min_depth (`int`, *optional*, defaults to 8):
52
+ All layers will have at least this many channels.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
54
+ The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
55
+ tf_padding (`bool`, *optional*, defaults to `True`):
56
+ Whether to use TensorFlow padding rules on the convolution layers.
57
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.999):
58
+ The dropout ratio for attached classifiers.
59
+ initializer_range (`float`, *optional*, defaults to 0.02):
60
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
61
+ layer_norm_eps (`float`, *optional*, defaults to 0.001):
62
+ The epsilon used by the layer normalization layers.
63
+
64
+ Example:
65
+
66
+ ```python
67
+ >>> from transformers import MobileNetV1Config, MobileNetV1Model
68
+
69
+ >>> # Initializing a "mobilenet_v1_1.0_224" style configuration
70
+ >>> configuration = MobileNetV1Config()
71
+
72
+ >>> # Initializing a model from the "mobilenet_v1_1.0_224" style configuration
73
+ >>> model = MobileNetV1Model(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "mobilenet_v1"
80
+
81
+ def __init__(
82
+ self,
83
+ num_channels=3,
84
+ image_size=224,
85
+ depth_multiplier=1.0,
86
+ min_depth=8,
87
+ hidden_act="relu6",
88
+ tf_padding=True,
89
+ classifier_dropout_prob=0.999,
90
+ initializer_range=0.02,
91
+ layer_norm_eps=0.001,
92
+ **kwargs,
93
+ ):
94
+ super().__init__(**kwargs)
95
+
96
+ if depth_multiplier <= 0:
97
+ raise ValueError("depth_multiplier must be greater than zero.")
98
+
99
+ self.num_channels = num_channels
100
+ self.image_size = image_size
101
+ self.depth_multiplier = depth_multiplier
102
+ self.min_depth = min_depth
103
+ self.hidden_act = hidden_act
104
+ self.tf_padding = tf_padding
105
+ self.classifier_dropout_prob = classifier_dropout_prob
106
+ self.initializer_range = initializer_range
107
+ self.layer_norm_eps = layer_norm_eps
108
+
109
+
110
+ class MobileNetV1OnnxConfig(OnnxConfig):
111
+ torch_onnx_minimum_version = version.parse("1.11")
112
+
113
+ @property
114
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
115
+ return OrderedDict([("pixel_values", {0: "batch"})])
116
+
117
+ @property
118
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
119
+ if self.task == "image-classification":
120
+ return OrderedDict([("logits", {0: "batch"})])
121
+ else:
122
+ return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
123
+
124
+ @property
125
+ def atol_for_validation(self) -> float:
126
+ return 1e-4
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MobileNetV1 checkpoints from the tensorflow/models library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import re
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import (
29
+ MobileNetV1Config,
30
+ MobileNetV1ForImageClassification,
31
+ MobileNetV1ImageProcessor,
32
+ load_tf_weights_in_mobilenet_v1,
33
+ )
34
+ from transformers.utils import logging
35
+
36
+
37
+ logging.set_verbosity_info()
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ def get_mobilenet_v1_config(model_name):
42
+ config = MobileNetV1Config(layer_norm_eps=0.001)
43
+
44
+ if "_quant" in model_name:
45
+ raise ValueError("Quantized models are not supported.")
46
+
47
+ matches = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$", model_name)
48
+ if matches:
49
+ config.depth_multiplier = float(matches[1])
50
+ config.image_size = int(matches[2])
51
+
52
+ # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
53
+ # the usual 1000. The first class (index 0) is "background".
54
+ config.num_labels = 1001
55
+ filename = "imagenet-1k-id2label.json"
56
+ repo_id = "huggingface/label-files"
57
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
58
+ id2label = {int(k) + 1: v for k, v in id2label.items()}
59
+ id2label[0] = "background"
60
+ config.id2label = id2label
61
+ config.label2id = {v: k for k, v in id2label.items()}
62
+
63
+ return config
64
+
65
+
66
+ # We will verify our results on an image of cute cats
67
+ def prepare_img():
68
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
69
+ im = Image.open(requests.get(url, stream=True).raw)
70
+ return im
71
+
72
+
73
+ @torch.no_grad()
74
+ def convert_movilevit_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
75
+ """
76
+ Copy/paste/tweak model's weights to our MobileNetV1 structure.
77
+ """
78
+ config = get_mobilenet_v1_config(model_name)
79
+
80
+ # Load 🤗 model
81
+ model = MobileNetV1ForImageClassification(config).eval()
82
+
83
+ # Load weights from TensorFlow checkpoint
84
+ load_tf_weights_in_mobilenet_v1(model, config, checkpoint_path)
85
+
86
+ # Check outputs on an image, prepared by MobileNetV1ImageProcessor
87
+ image_processor = MobileNetV1ImageProcessor(
88
+ crop_size={"width": config.image_size, "height": config.image_size},
89
+ size={"shortest_edge": config.image_size + 32},
90
+ )
91
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
92
+ outputs = model(**encoding)
93
+ logits = outputs.logits
94
+
95
+ assert logits.shape == (1, 1001)
96
+
97
+ if model_name == "mobilenet_v1_1.0_224":
98
+ expected_logits = torch.tensor([-4.1739, -1.1233, 3.1205])
99
+ elif model_name == "mobilenet_v1_0.75_192":
100
+ expected_logits = torch.tensor([-3.9440, -2.3141, -0.3333])
101
+ else:
102
+ expected_logits = None
103
+
104
+ if expected_logits is not None:
105
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
106
+
107
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
108
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
109
+ model.save_pretrained(pytorch_dump_folder_path)
110
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
111
+ image_processor.save_pretrained(pytorch_dump_folder_path)
112
+
113
+ if push_to_hub:
114
+ print("Pushing to the hub...")
115
+ repo_id = "google/" + model_name
116
+ image_processor.push_to_hub(repo_id)
117
+ model.push_to_hub(repo_id)
118
+
119
+
120
+ if __name__ == "__main__":
121
+ parser = argparse.ArgumentParser()
122
+ # Required parameters
123
+ parser.add_argument(
124
+ "--model_name",
125
+ default="mobilenet_v1_1.0_224",
126
+ type=str,
127
+ help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
128
+ )
129
+ parser.add_argument(
130
+ "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
131
+ )
132
+ parser.add_argument(
133
+ "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
134
+ )
135
+ parser.add_argument(
136
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
137
+ )
138
+
139
+ args = parser.parse_args()
140
+ convert_movilevit_checkpoint(
141
+ args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
142
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for MobileNetV1."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class MobileNetV1FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use MobileNetV1ImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for MobileNetV1."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_STANDARD_MEAN,
29
+ IMAGENET_STANDARD_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class MobileNetV1ImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a MobileNetV1 image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
54
+ `do_resize` in the `preprocess` method.
55
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
56
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
57
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
58
+ method.
59
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
60
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
61
+ `preprocess` method.
62
+ do_center_crop (`bool`, *optional*, defaults to `True`):
63
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
64
+ is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
65
+ `preprocess` method.
66
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
67
+ Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
68
+ Can be overridden by the `crop_size` parameter in the `preprocess` method.
69
+ do_rescale (`bool`, *optional*, defaults to `True`):
70
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
71
+ parameter in the `preprocess` method.
72
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
73
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
74
+ `preprocess` method.
75
+ do_normalize:
76
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
77
+ method.
78
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
79
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
80
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
81
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
82
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
83
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
84
+ """
85
+
86
+ model_input_names = ["pixel_values"]
87
+
88
+ def __init__(
89
+ self,
90
+ do_resize: bool = True,
91
+ size: Optional[Dict[str, int]] = None,
92
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
93
+ do_center_crop: bool = True,
94
+ crop_size: Dict[str, int] = None,
95
+ do_rescale: bool = True,
96
+ rescale_factor: Union[int, float] = 1 / 255,
97
+ do_normalize: bool = True,
98
+ image_mean: Optional[Union[float, List[float]]] = None,
99
+ image_std: Optional[Union[float, List[float]]] = None,
100
+ **kwargs,
101
+ ) -> None:
102
+ super().__init__(**kwargs)
103
+ size = size if size is not None else {"shortest_edge": 256}
104
+ size = get_size_dict(size, default_to_square=False)
105
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
106
+ crop_size = get_size_dict(crop_size)
107
+ self.do_resize = do_resize
108
+ self.size = size
109
+ self.resample = resample
110
+ self.do_center_crop = do_center_crop
111
+ self.crop_size = crop_size
112
+ self.do_rescale = do_rescale
113
+ self.rescale_factor = rescale_factor
114
+ self.do_normalize = do_normalize
115
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
116
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
117
+ self._valid_processor_keys = [
118
+ "images",
119
+ "do_resize",
120
+ "size",
121
+ "resample",
122
+ "do_center_crop",
123
+ "crop_size",
124
+ "do_rescale",
125
+ "rescale_factor",
126
+ "do_normalize",
127
+ "image_mean",
128
+ "image_std",
129
+ "return_tensors",
130
+ "data_format",
131
+ "input_data_format",
132
+ ]
133
+
134
+ # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
135
+ def resize(
136
+ self,
137
+ image: np.ndarray,
138
+ size: Dict[str, int],
139
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
140
+ data_format: Optional[Union[str, ChannelDimension]] = None,
141
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
142
+ **kwargs,
143
+ ) -> np.ndarray:
144
+ """
145
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
146
+ resized to keep the input aspect ratio.
147
+
148
+ Args:
149
+ image (`np.ndarray`):
150
+ Image to resize.
151
+ size (`Dict[str, int]`):
152
+ Size of the output image.
153
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
154
+ Resampling filter to use when resiizing the image.
155
+ data_format (`str` or `ChannelDimension`, *optional*):
156
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
157
+ input_data_format (`ChannelDimension` or `str`, *optional*):
158
+ The channel dimension format of the input image. If not provided, it will be inferred.
159
+ """
160
+ default_to_square = True
161
+ if "shortest_edge" in size:
162
+ size = size["shortest_edge"]
163
+ default_to_square = False
164
+ elif "height" in size and "width" in size:
165
+ size = (size["height"], size["width"])
166
+ else:
167
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
168
+
169
+ output_size = get_resize_output_image_size(
170
+ image,
171
+ size=size,
172
+ default_to_square=default_to_square,
173
+ input_data_format=input_data_format,
174
+ )
175
+ return resize(
176
+ image,
177
+ size=output_size,
178
+ resample=resample,
179
+ data_format=data_format,
180
+ input_data_format=input_data_format,
181
+ **kwargs,
182
+ )
183
+
184
+ def preprocess(
185
+ self,
186
+ images: ImageInput,
187
+ do_resize: Optional[bool] = None,
188
+ size: Dict[str, int] = None,
189
+ resample: PILImageResampling = None,
190
+ do_center_crop: bool = None,
191
+ crop_size: Dict[str, int] = None,
192
+ do_rescale: Optional[bool] = None,
193
+ rescale_factor: Optional[float] = None,
194
+ do_normalize: Optional[bool] = None,
195
+ image_mean: Optional[Union[float, List[float]]] = None,
196
+ image_std: Optional[Union[float, List[float]]] = None,
197
+ return_tensors: Optional[Union[str, TensorType]] = None,
198
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
199
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
200
+ **kwargs,
201
+ ):
202
+ """
203
+ Preprocess an image or batch of images.
204
+
205
+ Args:
206
+ images (`ImageInput`):
207
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
208
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
209
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
210
+ Whether to resize the image.
211
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
212
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
213
+ the longest edge resized to keep the input aspect ratio.
214
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
215
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
216
+ an effect if `do_resize` is set to `True`.
217
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
218
+ Whether to center crop the image.
219
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
220
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
221
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
222
+ Whether to rescale the image values between [0 - 1].
223
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
224
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
225
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
226
+ Whether to normalize the image.
227
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
228
+ Image mean to use if `do_normalize` is set to `True`.
229
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
230
+ Image standard deviation to use if `do_normalize` is set to `True`.
231
+ return_tensors (`str` or `TensorType`, *optional*):
232
+ The type of tensors to return. Can be one of:
233
+ - Unset: Return a list of `np.ndarray`.
234
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
235
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
236
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
237
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
238
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
239
+ The channel dimension format for the output image. Can be one of:
240
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
241
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
242
+ - Unset: Use the channel dimension format of the input image.
243
+ input_data_format (`ChannelDimension` or `str`, *optional*):
244
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
245
+ from the input image. Can be one of:
246
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
247
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
248
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
249
+ """
250
+ do_resize = do_resize if do_resize is not None else self.do_resize
251
+ size = size if size is not None else self.size
252
+ size = get_size_dict(size, default_to_square=False)
253
+ resample = resample if resample is not None else self.resample
254
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
255
+ crop_size = crop_size if crop_size is not None else self.crop_size
256
+ crop_size = get_size_dict(crop_size)
257
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
258
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
259
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
260
+ image_mean = image_mean if image_mean is not None else self.image_mean
261
+ image_std = image_std if image_std is not None else self.image_std
262
+
263
+ images = make_list_of_images(images)
264
+
265
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
266
+
267
+ if not valid_images(images):
268
+ raise ValueError(
269
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
270
+ "torch.Tensor, tf.Tensor or jax.ndarray."
271
+ )
272
+ validate_preprocess_arguments(
273
+ do_rescale=do_rescale,
274
+ rescale_factor=rescale_factor,
275
+ do_normalize=do_normalize,
276
+ image_mean=image_mean,
277
+ image_std=image_std,
278
+ do_center_crop=do_center_crop,
279
+ crop_size=crop_size,
280
+ do_resize=do_resize,
281
+ size=size,
282
+ resample=resample,
283
+ )
284
+
285
+ # All transformations expect numpy arrays.
286
+ images = [to_numpy_array(image) for image in images]
287
+
288
+ if is_scaled_image(images[0]) and do_rescale:
289
+ logger.warning_once(
290
+ "It looks like you are trying to rescale already rescaled images. If the input"
291
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
292
+ )
293
+
294
+ if input_data_format is None:
295
+ # We assume that all images have the same channel dimension format.
296
+ input_data_format = infer_channel_dimension_format(images[0])
297
+
298
+ if do_resize:
299
+ images = [
300
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
301
+ for image in images
302
+ ]
303
+
304
+ if do_center_crop:
305
+ images = [
306
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
307
+ ]
308
+
309
+ if do_rescale:
310
+ images = [
311
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
312
+ for image in images
313
+ ]
314
+
315
+ if do_normalize:
316
+ images = [
317
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
318
+ for image in images
319
+ ]
320
+
321
+ images = [
322
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
323
+ ]
324
+
325
+ data = {"pixel_values": images}
326
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch MobileNetV1 model."""
16
+
17
+
18
+ from typing import Optional, Union
19
+
20
+ import torch
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
28
+ from .configuration_mobilenet_v1 import MobileNetV1Config
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ # General docstring
35
+ _CONFIG_FOR_DOC = "MobileNetV1Config"
36
+
37
+ # Base docstring
38
+ _CHECKPOINT_FOR_DOC = "google/mobilenet_v1_1.0_224"
39
+ _EXPECTED_OUTPUT_SHAPE = [1, 1024, 7, 7]
40
+
41
+ # Image classification docstring
42
+ _IMAGE_CLASS_CHECKPOINT = "google/mobilenet_v1_1.0_224"
43
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
44
+
45
+
46
+ from ..deprecated._archive_maps import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
47
+
48
+
49
+ def _build_tf_to_pytorch_map(model, config, tf_weights=None):
50
+ """
51
+ A map of modules from TF to PyTorch.
52
+ """
53
+
54
+ tf_to_pt_map = {}
55
+
56
+ if isinstance(model, MobileNetV1ForImageClassification):
57
+ backbone = model.mobilenet_v1
58
+ else:
59
+ backbone = model
60
+
61
+ prefix = "MobilenetV1/Conv2d_0/"
62
+ tf_to_pt_map[prefix + "weights"] = backbone.conv_stem.convolution.weight
63
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = backbone.conv_stem.normalization.bias
64
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = backbone.conv_stem.normalization.weight
65
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.normalization.running_mean
66
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.normalization.running_var
67
+
68
+ for i in range(13):
69
+ tf_index = i + 1
70
+ pt_index = i * 2
71
+
72
+ pointer = backbone.layer[pt_index]
73
+ prefix = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
74
+ tf_to_pt_map[prefix + "depthwise_weights"] = pointer.convolution.weight
75
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias
76
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight
77
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean
78
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var
79
+
80
+ pointer = backbone.layer[pt_index + 1]
81
+ prefix = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
82
+ tf_to_pt_map[prefix + "weights"] = pointer.convolution.weight
83
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias
84
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight
85
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean
86
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var
87
+
88
+ if isinstance(model, MobileNetV1ForImageClassification):
89
+ prefix = "MobilenetV1/Logits/Conv2d_1c_1x1/"
90
+ tf_to_pt_map[prefix + "weights"] = model.classifier.weight
91
+ tf_to_pt_map[prefix + "biases"] = model.classifier.bias
92
+
93
+ return tf_to_pt_map
94
+
95
+
96
+ def load_tf_weights_in_mobilenet_v1(model, config, tf_checkpoint_path):
97
+ """Load TensorFlow checkpoints in a PyTorch model."""
98
+ try:
99
+ import numpy as np
100
+ import tensorflow as tf
101
+ except ImportError:
102
+ logger.error(
103
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
104
+ "https://www.tensorflow.org/install/ for installation instructions."
105
+ )
106
+ raise
107
+
108
+ # Load weights from TF model
109
+ init_vars = tf.train.list_variables(tf_checkpoint_path)
110
+ tf_weights = {}
111
+ for name, shape in init_vars:
112
+ logger.info(f"Loading TF weight {name} with shape {shape}")
113
+ array = tf.train.load_variable(tf_checkpoint_path, name)
114
+ tf_weights[name] = array
115
+
116
+ # Build TF to PyTorch weights loading map
117
+ tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights)
118
+
119
+ for name, pointer in tf_to_pt_map.items():
120
+ logger.info(f"Importing {name}")
121
+ if name not in tf_weights:
122
+ logger.info(f"{name} not in tf pre-trained weights, skipping")
123
+ continue
124
+
125
+ array = tf_weights[name]
126
+
127
+ if "depthwise_weights" in name:
128
+ logger.info("Transposing depthwise")
129
+ array = np.transpose(array, (2, 3, 0, 1))
130
+ elif "weights" in name:
131
+ logger.info("Transposing")
132
+ if len(pointer.shape) == 2: # copying into linear layer
133
+ array = array.squeeze().transpose()
134
+ else:
135
+ array = np.transpose(array, (3, 2, 0, 1))
136
+
137
+ if pointer.shape != array.shape:
138
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
139
+
140
+ logger.info(f"Initialize PyTorch weight {name} {array.shape}")
141
+ pointer.data = torch.from_numpy(array)
142
+
143
+ tf_weights.pop(name, None)
144
+ tf_weights.pop(name + "/RMSProp", None)
145
+ tf_weights.pop(name + "/RMSProp_1", None)
146
+ tf_weights.pop(name + "/ExponentialMovingAverage", None)
147
+
148
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
149
+ return model
150
+
151
+
152
+ def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor:
153
+ """
154
+ Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at:
155
+ https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2
156
+ """
157
+ in_height, in_width = features.shape[-2:]
158
+ stride_height, stride_width = conv_layer.stride
159
+ kernel_height, kernel_width = conv_layer.kernel_size
160
+
161
+ if in_height % stride_height == 0:
162
+ pad_along_height = max(kernel_height - stride_height, 0)
163
+ else:
164
+ pad_along_height = max(kernel_height - (in_height % stride_height), 0)
165
+
166
+ if in_width % stride_width == 0:
167
+ pad_along_width = max(kernel_width - stride_width, 0)
168
+ else:
169
+ pad_along_width = max(kernel_width - (in_width % stride_width), 0)
170
+
171
+ pad_left = pad_along_width // 2
172
+ pad_right = pad_along_width - pad_left
173
+ pad_top = pad_along_height // 2
174
+ pad_bottom = pad_along_height - pad_top
175
+
176
+ padding = (pad_left, pad_right, pad_top, pad_bottom)
177
+ return nn.functional.pad(features, padding, "constant", 0.0)
178
+
179
+
180
+ class MobileNetV1ConvLayer(nn.Module):
181
+ def __init__(
182
+ self,
183
+ config: MobileNetV1Config,
184
+ in_channels: int,
185
+ out_channels: int,
186
+ kernel_size: int,
187
+ stride: Optional[int] = 1,
188
+ groups: Optional[int] = 1,
189
+ bias: bool = False,
190
+ use_normalization: Optional[bool] = True,
191
+ use_activation: Optional[bool or str] = True,
192
+ ) -> None:
193
+ super().__init__()
194
+ self.config = config
195
+
196
+ if in_channels % groups != 0:
197
+ raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
198
+ if out_channels % groups != 0:
199
+ raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
200
+
201
+ padding = 0 if config.tf_padding else int((kernel_size - 1) / 2)
202
+
203
+ self.convolution = nn.Conv2d(
204
+ in_channels=in_channels,
205
+ out_channels=out_channels,
206
+ kernel_size=kernel_size,
207
+ stride=stride,
208
+ padding=padding,
209
+ groups=groups,
210
+ bias=bias,
211
+ padding_mode="zeros",
212
+ )
213
+
214
+ if use_normalization:
215
+ self.normalization = nn.BatchNorm2d(
216
+ num_features=out_channels,
217
+ eps=config.layer_norm_eps,
218
+ momentum=0.9997,
219
+ affine=True,
220
+ track_running_stats=True,
221
+ )
222
+ else:
223
+ self.normalization = None
224
+
225
+ if use_activation:
226
+ if isinstance(use_activation, str):
227
+ self.activation = ACT2FN[use_activation]
228
+ elif isinstance(config.hidden_act, str):
229
+ self.activation = ACT2FN[config.hidden_act]
230
+ else:
231
+ self.activation = config.hidden_act
232
+ else:
233
+ self.activation = None
234
+
235
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
236
+ if self.config.tf_padding:
237
+ features = apply_tf_padding(features, self.convolution)
238
+ features = self.convolution(features)
239
+ if self.normalization is not None:
240
+ features = self.normalization(features)
241
+ if self.activation is not None:
242
+ features = self.activation(features)
243
+ return features
244
+
245
+
246
+ class MobileNetV1PreTrainedModel(PreTrainedModel):
247
+ """
248
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
249
+ models.
250
+ """
251
+
252
+ config_class = MobileNetV1Config
253
+ load_tf_weights = load_tf_weights_in_mobilenet_v1
254
+ base_model_prefix = "mobilenet_v1"
255
+ main_input_name = "pixel_values"
256
+ supports_gradient_checkpointing = False
257
+
258
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
259
+ """Initialize the weights"""
260
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
261
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
262
+ if module.bias is not None:
263
+ module.bias.data.zero_()
264
+ elif isinstance(module, nn.BatchNorm2d):
265
+ module.bias.data.zero_()
266
+ module.weight.data.fill_(1.0)
267
+
268
+
269
+ MOBILENET_V1_START_DOCSTRING = r"""
270
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
271
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
272
+ behavior.
273
+
274
+ Parameters:
275
+ config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
276
+ Initializing with a config file does not load the weights associated with the model, only the
277
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
278
+ """
279
+
280
+ MOBILENET_V1_INPUTS_DOCSTRING = r"""
281
+ Args:
282
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
283
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
284
+ [`MobileNetV1ImageProcessor.__call__`] for details.
285
+ output_hidden_states (`bool`, *optional*):
286
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
287
+ more detail.
288
+ return_dict (`bool`, *optional*):
289
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
290
+ """
291
+
292
+
293
+ @add_start_docstrings(
294
+ "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.",
295
+ MOBILENET_V1_START_DOCSTRING,
296
+ )
297
+ class MobileNetV1Model(MobileNetV1PreTrainedModel):
298
+ def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool = True):
299
+ super().__init__(config)
300
+ self.config = config
301
+
302
+ depth = 32
303
+ out_channels = max(int(depth * config.depth_multiplier), config.min_depth)
304
+
305
+ self.conv_stem = MobileNetV1ConvLayer(
306
+ config,
307
+ in_channels=config.num_channels,
308
+ out_channels=out_channels,
309
+ kernel_size=3,
310
+ stride=2,
311
+ )
312
+
313
+ strides = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
314
+
315
+ self.layer = nn.ModuleList()
316
+ for i in range(13):
317
+ in_channels = out_channels
318
+
319
+ if strides[i] == 2 or i == 0:
320
+ depth *= 2
321
+ out_channels = max(int(depth * config.depth_multiplier), config.min_depth)
322
+
323
+ self.layer.append(
324
+ MobileNetV1ConvLayer(
325
+ config,
326
+ in_channels=in_channels,
327
+ out_channels=in_channels,
328
+ kernel_size=3,
329
+ stride=strides[i],
330
+ groups=in_channels,
331
+ )
332
+ )
333
+
334
+ self.layer.append(
335
+ MobileNetV1ConvLayer(
336
+ config,
337
+ in_channels=in_channels,
338
+ out_channels=out_channels,
339
+ kernel_size=1,
340
+ )
341
+ )
342
+
343
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None
344
+
345
+ # Initialize weights and apply final processing
346
+ self.post_init()
347
+
348
+ def _prune_heads(self, heads_to_prune):
349
+ raise NotImplementedError
350
+
351
+ @add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING)
352
+ @add_code_sample_docstrings(
353
+ checkpoint=_CHECKPOINT_FOR_DOC,
354
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
355
+ config_class=_CONFIG_FOR_DOC,
356
+ modality="vision",
357
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
358
+ )
359
+ def forward(
360
+ self,
361
+ pixel_values: Optional[torch.Tensor] = None,
362
+ output_hidden_states: Optional[bool] = None,
363
+ return_dict: Optional[bool] = None,
364
+ ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
365
+ output_hidden_states = (
366
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
367
+ )
368
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
369
+
370
+ if pixel_values is None:
371
+ raise ValueError("You have to specify pixel_values")
372
+
373
+ hidden_states = self.conv_stem(pixel_values)
374
+
375
+ all_hidden_states = () if output_hidden_states else None
376
+
377
+ for i, layer_module in enumerate(self.layer):
378
+ hidden_states = layer_module(hidden_states)
379
+
380
+ if output_hidden_states:
381
+ all_hidden_states = all_hidden_states + (hidden_states,)
382
+
383
+ last_hidden_state = hidden_states
384
+
385
+ if self.pooler is not None:
386
+ pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1)
387
+ else:
388
+ pooled_output = None
389
+
390
+ if not return_dict:
391
+ return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
392
+
393
+ return BaseModelOutputWithPoolingAndNoAttention(
394
+ last_hidden_state=last_hidden_state,
395
+ pooler_output=pooled_output,
396
+ hidden_states=all_hidden_states,
397
+ )
398
+
399
+
400
+ @add_start_docstrings(
401
+ """
402
+ MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
403
+ ImageNet.
404
+ """,
405
+ MOBILENET_V1_START_DOCSTRING,
406
+ )
407
+ class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel):
408
+ def __init__(self, config: MobileNetV1Config) -> None:
409
+ super().__init__(config)
410
+
411
+ self.num_labels = config.num_labels
412
+ self.mobilenet_v1 = MobileNetV1Model(config)
413
+
414
+ last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels
415
+
416
+ # Classifier head
417
+ self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
418
+ self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
419
+
420
+ # Initialize weights and apply final processing
421
+ self.post_init()
422
+
423
+ @add_start_docstrings_to_model_forward(MOBILENET_V1_INPUTS_DOCSTRING)
424
+ @add_code_sample_docstrings(
425
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
426
+ output_type=ImageClassifierOutputWithNoAttention,
427
+ config_class=_CONFIG_FOR_DOC,
428
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
429
+ )
430
+ def forward(
431
+ self,
432
+ pixel_values: Optional[torch.Tensor] = None,
433
+ output_hidden_states: Optional[bool] = None,
434
+ labels: Optional[torch.Tensor] = None,
435
+ return_dict: Optional[bool] = None,
436
+ ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
437
+ r"""
438
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
439
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
440
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
441
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
442
+ """
443
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
444
+
445
+ outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
446
+
447
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
448
+
449
+ logits = self.classifier(self.dropout(pooled_output))
450
+
451
+ loss = None
452
+ if labels is not None:
453
+ if self.config.problem_type is None:
454
+ if self.num_labels == 1:
455
+ self.config.problem_type = "regression"
456
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
457
+ self.config.problem_type = "single_label_classification"
458
+ else:
459
+ self.config.problem_type = "multi_label_classification"
460
+
461
+ if self.config.problem_type == "regression":
462
+ loss_fct = MSELoss()
463
+ if self.num_labels == 1:
464
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
465
+ else:
466
+ loss = loss_fct(logits, labels)
467
+ elif self.config.problem_type == "single_label_classification":
468
+ loss_fct = CrossEntropyLoss()
469
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
470
+ elif self.config.problem_type == "multi_label_classification":
471
+ loss_fct = BCEWithLogitsLoss()
472
+ loss = loss_fct(logits, labels)
473
+
474
+ if not return_dict:
475
+ output = (logits,) + outputs[2:]
476
+ return ((loss,) + output) if loss is not None else output
477
+
478
+ return ImageClassifierOutputWithNoAttention(
479
+ loss=loss,
480
+ logits=logits,
481
+ hidden_states=outputs.hidden_states,
482
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_mpt": ["MPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MptConfig", "MptOnnxConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_mpt"] = [
31
+ "MPT_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "MptForCausalLM",
33
+ "MptModel",
34
+ "MptPreTrainedModel",
35
+ "MptForSequenceClassification",
36
+ "MptForTokenClassification",
37
+ "MptForQuestionAnswering",
38
+ ]
39
+
40
+ if TYPE_CHECKING:
41
+ from .configuration_mpt import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP, MptConfig, MptOnnxConfig
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ from .modeling_mpt import (
50
+ MPT_PRETRAINED_MODEL_ARCHIVE_LIST,
51
+ MptForCausalLM,
52
+ MptForQuestionAnswering,
53
+ MptForSequenceClassification,
54
+ MptForTokenClassification,
55
+ MptModel,
56
+ MptPreTrainedModel,
57
+ )
58
+
59
+ else:
60
+ import sys
61
+
62
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/configuration_mpt.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/modeling_mpt.cpython-310.pyc ADDED
Binary file (27.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/configuration_mpt.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Mpt configuration"""
16
+ from typing import TYPE_CHECKING, Optional, Union
17
+
18
+
19
+ if TYPE_CHECKING:
20
+ pass
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class MptAttentionConfig(PretrainedConfig):
33
+ """
34
+ This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate
35
+ attention layers according to the specified arguments, defining the layers architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the MPT
37
+ [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward
38
+ compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`).
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ attn_type (`str`, *optional*, defaults to `"multihead_attention"`):
45
+ type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`.
46
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
47
+ The dropout probability for the attention layers.
48
+ attn_impl (`str`, *optional*, defaults to `"torch"`):
49
+ The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`.
50
+ clip_qkv (`float`, *optional*):
51
+ If not `None`, clip the queries, keys, and values in the attention layer to this value.
52
+ softmax_scale (`float`, *optional*, defaults to `None`):
53
+ If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to
54
+ `1/sqrt(hidden_size)`.
55
+ prefix_lm (`bool`, *optional*, defaults to `False`)):
56
+ Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument
57
+ which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another
58
+ bi-directionally. Tokens outside the prefix use causal attention.
59
+ qk_ln (`bool`, *optional*, defaults to `False`):
60
+ Whether to apply layer normalization to the queries and keys in the attention layer.
61
+ attn_uses_sequence_id (`bool`, *optional*, defaults to `False`)):
62
+ Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train`
63
+ mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each
64
+ token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored.
65
+ alibi (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to use the alibi bias instead of positional embedding.
67
+ alibi_bias_max (`int`, *optional*, defaults to 8):
68
+ The maximum value of the alibi bias.
69
+ """
70
+
71
+ def __init__(
72
+ self,
73
+ attn_type="multihead_attention",
74
+ attn_pdrop=0,
75
+ attn_impl="torch",
76
+ clip_qkv=None,
77
+ softmax_scale=None,
78
+ prefix_lm=False,
79
+ qk_ln=False,
80
+ attn_uses_sequence_id=False,
81
+ alibi=True,
82
+ alibi_bias_max=8,
83
+ **kwargs,
84
+ ):
85
+ super().__init__()
86
+ self.attn_type = attn_type
87
+ self.attn_pdrop = attn_pdrop
88
+ self.attn_impl = attn_impl
89
+ self.clip_qkv = clip_qkv
90
+ self.softmax_scale = softmax_scale
91
+ self.prefix_lm = prefix_lm
92
+ self.attn_uses_sequence_id = attn_uses_sequence_id
93
+ self.alibi = alibi
94
+ self.qk_ln = qk_ln
95
+ self.alibi_bias_max = alibi_bias_max
96
+
97
+ if attn_type not in ["multihead_attention", "multiquery_attention"]:
98
+ raise ValueError(
99
+ f"`attn_type` has to be either `multihead_attention` or `multiquery_attention`. Received: {attn_type}"
100
+ )
101
+
102
+ @classmethod
103
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "PretrainedConfig":
104
+ cls._set_token_in_kwargs(kwargs)
105
+
106
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
107
+
108
+ if config_dict.get("model_type") == "mpt":
109
+ config_dict = config_dict["attn_config"]
110
+
111
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
112
+ logger.warning(
113
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
114
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
115
+ )
116
+
117
+ return cls.from_dict(config_dict, **kwargs)
118
+
119
+
120
+ class MptConfig(PretrainedConfig):
121
+ """
122
+ This is the configuration class to store the configuration of a [`MptModel`]. It is used to instantiate a Mpt model
123
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
124
+ defaults will yield a similar configuration to the Mpt-7b architecture
125
+ [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b).
126
+
127
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
128
+ documentation from [`PretrainedConfig`] for more information.
129
+
130
+
131
+ Args:
132
+ d_model (`int`, *optional*, defaults to 2048):
133
+ Dimensionality of the embeddings and hidden states.
134
+ n_heads (`int`, *optional*, defaults to 16):
135
+ Number of attention heads for each attention layer in the Transformer encoder.
136
+ n_layers (`int`, *optional*, defaults to 24):
137
+ Number of hidden layers in the Transformer encoder.
138
+ expansion_ratio (`int`, *optional*, defaults to 4):
139
+ The ratio of the up/down scale in the MLP.
140
+ max_seq_len (`int`, *optional*, defaults to 2048):
141
+ The maximum sequence length of the model.
142
+ vocab_size (`int`, *optional*, defaults to 50368):
143
+ Vocabulary size of the Mpt model. Defines the maximum number of different tokens that can be represented by
144
+ the `inputs_ids` passed when calling [`MptModel`]. Check [this
145
+ discussion](https://huggingface.co/bigscience/mpt/discussions/120#633d28389addb8530b406c2a) on how the
146
+ `vocab_size` has been defined.
147
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
148
+ The dropout probability applied to the attention output before combining with residual.
149
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
150
+ The epsilon to use in the layer normalization layers.
151
+ emb_pdrop (`float`, *optional*, defaults to 0.0):
152
+ The dropout probability for the embedding layer.
153
+ learned_pos_emb (`bool`, *optional*, defaults to `True`):
154
+ Whether to use learned positional embeddings.
155
+ attn_config (`dict`, *optional*):
156
+ A dictionary used to configure the model's attention module.
157
+ init_device (`str`, *optional*, defaults to `"cpu"`):
158
+ The device to use for parameter initialization. Defined for backward compatibility
159
+ logit_scale (`float`, *optional*):
160
+ If not None, scale the logits by this value.
161
+ no_bias (`bool`, *optional*, defaults to `True`):
162
+ Whether to use bias in all linear layers.
163
+ verbose (`int`, *optional*, defaults to 0):
164
+ The verbosity level to use for logging. Used in the previous versions of MPT models for logging. This
165
+ argument is deprecated.
166
+ embedding_fraction (`float`, *optional*, defaults to 1.0):
167
+ The fraction to scale the gradients of the embedding layer by.
168
+ norm_type (`str`, *optional*, defaults to `"low_precision_layernorm"`):
169
+ Type of layer norm to use. All MPT models uses the same layer norm implementation. Defined for backward
170
+ compatibility.
171
+ use_cache (`bool`, *optional*, defaults to `False`):
172
+ Whether or not the model should return the last key/values attentions (not used by all models).
173
+ initializer_range (`float`, *optional*, defaults to 0.02):
174
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
175
+
176
+ Example:
177
+
178
+ ```python
179
+ >>> from transformers import MptConfig, MptModel
180
+
181
+ >>> # Initializing a Mpt configuration
182
+ >>> configuration = MptConfig()
183
+
184
+ >>> # Initializing a model (with random weights) from the configuration
185
+ >>> model = MptModel(configuration)
186
+
187
+ >>> # Accessing the model configuration
188
+ >>> configuration = model.config
189
+ ```
190
+ """
191
+
192
+ model_type = "mpt"
193
+ attribute_map = {
194
+ "num_attention_heads": "n_heads",
195
+ "hidden_size": "d_model",
196
+ "num_hidden_layers": "n_layers",
197
+ }
198
+
199
+ def __init__(
200
+ self,
201
+ d_model: int = 2048,
202
+ n_heads: int = 16,
203
+ n_layers: int = 24,
204
+ expansion_ratio: int = 4,
205
+ max_seq_len: int = 2048,
206
+ vocab_size: int = 50368,
207
+ resid_pdrop: float = 0.0,
208
+ layer_norm_epsilon: float = 1e-5,
209
+ emb_pdrop: float = 0.0,
210
+ learned_pos_emb: bool = True,
211
+ attn_config: MptAttentionConfig = None,
212
+ init_device: str = "cpu",
213
+ logit_scale: Optional[Union[float, str]] = None,
214
+ no_bias: bool = True,
215
+ verbose: int = 0,
216
+ embedding_fraction: float = 1.0,
217
+ norm_type: str = "low_precision_layernorm",
218
+ use_cache: bool = False,
219
+ initializer_range=0.02,
220
+ **kwargs,
221
+ ):
222
+ if attn_config is None:
223
+ self.attn_config = MptAttentionConfig()
224
+ elif isinstance(attn_config, dict):
225
+ self.attn_config = MptAttentionConfig(**attn_config)
226
+ else:
227
+ self.attn_config = attn_config
228
+ self.d_model = d_model
229
+ self.n_heads = n_heads
230
+ self.n_layers = n_layers
231
+ self.expansion_ratio = expansion_ratio
232
+ self.max_seq_len = max_seq_len
233
+ self.vocab_size = vocab_size
234
+ self.resid_pdrop = resid_pdrop
235
+ self.emb_pdrop = emb_pdrop
236
+ self.learned_pos_emb = learned_pos_emb
237
+ self.init_device = init_device
238
+ self.logit_scale = logit_scale
239
+ self.no_bias = no_bias
240
+ self.verbose = verbose
241
+ self.embedding_fraction = embedding_fraction
242
+ self.norm_type = norm_type
243
+ self.layer_norm_epsilon = layer_norm_epsilon
244
+ self.use_cache = use_cache
245
+ self.initializer_range = initializer_range
246
+ super().__init__(**kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/mpt/modeling_mpt.py ADDED
@@ -0,0 +1,942 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch MPT model."""
16
+
17
+ import math
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
24
+ from torch.nn import functional as F
25
+
26
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
27
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ CausalLMOutputWithCrossAttentions,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutputWithPast,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...utils import logging
37
+ from .configuration_mpt import MptConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "mosaicml/mpt-7b"
43
+ _CONFIG_FOR_DOC = "MptConfig"
44
+
45
+
46
+ from ..deprecated._archive_maps import MPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
47
+
48
+
49
+ def build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max=8, device=None):
50
+ r"""
51
+ Link to paper: https://arxiv.org/abs/2108.12409 - Alibi tensor is not causal as the original paper mentions, it
52
+ relies on a translation invariance of softmax for quick implementation. This implementation has been copied from
53
+ the alibi implementation of MPT source code that led to slightly different results than the Bloom alibi:
54
+ https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L292
55
+ """
56
+ alibi = torch.arange(1 - sequence_length, 1, dtype=torch.int32, device=device).view(1, 1, 1, sequence_length)
57
+ num_heads_power_of_2 = 2 ** math.ceil(math.log2(num_heads))
58
+
59
+ base = torch.arange(1, num_heads_power_of_2 + 1, dtype=torch.int64, device=device).float()
60
+ base = base * (alibi_bias_max / num_heads_power_of_2)
61
+
62
+ slopes = 1.0 / torch.pow(2, base)
63
+ slopes = slopes.view(1, num_heads_power_of_2, 1, 1)
64
+
65
+ if num_heads_power_of_2 != num_heads:
66
+ slopes = torch.concat([slopes[:, 1::2, ...], slopes[:, ::2, ...]], dim=1)[:, :num_heads, ...]
67
+
68
+ alibi = alibi * slopes
69
+ return alibi.squeeze(0)
70
+
71
+
72
+ class MptAttention(nn.Module):
73
+ """Multi-head self attention.
74
+ Using torch or triton attention implemetation enables user to also use additive bias.
75
+ """
76
+
77
+ def __init__(self, config: MptConfig):
78
+ super().__init__()
79
+ self.hidden_size = config.hidden_size
80
+ self.n_heads = config.n_heads
81
+ self.max_seq_length = config.max_seq_len
82
+ self.head_dim = self.hidden_size // self.n_heads
83
+ self.softmax_scale = config.attn_config.softmax_scale
84
+ if self.softmax_scale is None:
85
+ self.softmax_scale = 1 / math.sqrt(self.hidden_size / self.n_heads)
86
+
87
+ self.attn_dropout_p = config.attn_config.attn_pdrop
88
+ self.Wqkv = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
89
+ self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
90
+
91
+ def forward(
92
+ self,
93
+ hidden_states: torch.Tensor,
94
+ position_bias: torch.Tensor,
95
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
96
+ attention_mask: Optional[torch.Tensor] = None,
97
+ ):
98
+ batch_size, seq_length = hidden_states.shape[:2]
99
+
100
+ mixed_qkv = self.Wqkv(hidden_states)
101
+ query_states, key_states, value_states = mixed_qkv.chunk(3, dim=2)
102
+ query_states = query_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
103
+ key_states = key_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
104
+ value_states = value_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
105
+
106
+ if past_key_value is not None:
107
+ if len(past_key_value) != 0:
108
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
109
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
110
+ past_key_value = (key_states, value_states)
111
+ else:
112
+ past_key_value = (key_states, value_states)
113
+
114
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale
115
+
116
+ query_length = seq_length if past_key_value is None else seq_length + past_key_value[0].shape[2]
117
+
118
+ if position_bias is not None:
119
+ if len(position_bias.shape) != 3:
120
+ raise ValueError(f"Expecting position_bias shape to be 3 dimensions, got {len(position_bias.shape)}")
121
+ key_length = key_states.shape[-2]
122
+
123
+ position_bias_query_index = max(0, position_bias.size(1) - query_length)
124
+ position_bias_key_index = max(0, position_bias.size(2) - key_length)
125
+
126
+ position_bias = position_bias[:, position_bias_query_index:, position_bias_key_index:]
127
+
128
+ attention_scores = attention_scores + position_bias
129
+
130
+ if attention_mask is not None:
131
+ attention_scores = attention_scores.masked_fill(attention_mask, torch.finfo(query_states.dtype).min)
132
+
133
+ # (batch_size, n_heads, seq_length, key_length)
134
+ attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).to(value_states.dtype)
135
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attn_dropout_p, training=self.training)
136
+
137
+ context_states = torch.matmul(attn_weights, value_states)
138
+ context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
139
+ attn_output = self.out_proj(context_states)
140
+
141
+ return attn_output, attn_weights, past_key_value
142
+
143
+
144
+ class MptMLP(nn.Module):
145
+ def __init__(self, config: MptConfig):
146
+ super().__init__()
147
+ hidden_size = config.hidden_size
148
+
149
+ self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False)
150
+ self.act = nn.GELU(approximate="none")
151
+ self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False)
152
+ self.hidden_dropout = config.attn_config.attn_pdrop
153
+
154
+ def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
155
+ hidden_states = self.act(self.up_proj(hidden_states))
156
+
157
+ intermediate_output = self.down_proj(hidden_states)
158
+
159
+ output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training)
160
+ output = output + residual
161
+
162
+ return output
163
+
164
+
165
+ class MptBlock(nn.Module):
166
+ def __init__(self, config: MptConfig):
167
+ super().__init__()
168
+ hidden_size = config.hidden_size
169
+
170
+ self.norm_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
171
+ # backward compatibility with weights on the Hub
172
+ self.norm_1.bias = None
173
+
174
+ self.num_heads = config.n_heads
175
+ self.attn = MptAttention(config)
176
+
177
+ self.norm_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
178
+ # backward compatibility with weights on the Hub
179
+ self.norm_2.bias = None
180
+
181
+ self.ffn = MptMLP(config)
182
+
183
+ self.dropout_rate = config.attn_config.attn_pdrop
184
+ self.resid_attn_dropout = nn.Dropout(self.dropout_rate)
185
+
186
+ def forward(
187
+ self,
188
+ hidden_states: torch.Tensor,
189
+ position_bias: torch.Tensor,
190
+ attention_mask: torch.Tensor,
191
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
192
+ use_cache: bool = False,
193
+ output_attentions: bool = False,
194
+ ):
195
+ # hidden_states: [batch_size, seq_length, hidden_size]
196
+ # Layer norm at the beginning of the transformer layer.
197
+ layernorm_output = self.norm_1(hidden_states)
198
+
199
+ residual = hidden_states
200
+
201
+ # Self attention.
202
+ attn_outputs, attn_weights, past_key_value = self.attn(
203
+ layernorm_output,
204
+ position_bias=position_bias,
205
+ attention_mask=attention_mask,
206
+ past_key_value=layer_past,
207
+ )
208
+
209
+ hidden_states = self.resid_attn_dropout(attn_outputs) + residual
210
+
211
+ layernorm_output = self.norm_2(hidden_states)
212
+
213
+ # Get residual
214
+ residual = hidden_states
215
+
216
+ # MLP.
217
+ output = self.ffn(layernorm_output, residual)
218
+ outputs = (output,)
219
+
220
+ if use_cache:
221
+ outputs += (past_key_value,)
222
+
223
+ if output_attentions:
224
+ outputs += (attn_weights,)
225
+
226
+ return outputs # hidden_states, present, attentions
227
+
228
+
229
+ class MptPreTrainedModel(PreTrainedModel):
230
+ config_class = MptConfig
231
+ base_model_prefix = "transformer"
232
+ supports_gradient_checkpointing = True
233
+ _no_split_modules = ["MptBlock"]
234
+ _keys_to_ignore_on_load_missing = [r"lm_head.*."]
235
+
236
+ def __init__(self, *inputs, **kwargs):
237
+ super().__init__(*inputs, **kwargs)
238
+
239
+ def _init_weights(self, module: nn.Module):
240
+ """Initialize the weights."""
241
+ if isinstance(module, nn.Linear):
242
+ # Slightly different from the TF version which uses truncated_normal for initialization
243
+ # cf https://github.com/pytorch/pytorch/pull/5617
244
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
245
+ if module.bias is not None:
246
+ module.bias.data.zero_()
247
+ elif isinstance(module, nn.Embedding):
248
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
249
+ if module.padding_idx is not None:
250
+ module.weight.data[module.padding_idx].zero_()
251
+ elif isinstance(module, LayerNorm):
252
+ if module.bias is not None:
253
+ module.bias.data.zero_()
254
+ module.weight.data.fill_(1.0)
255
+
256
+ @staticmethod
257
+ def _convert_to_mpt_cache(
258
+ past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]],
259
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
260
+ """
261
+ Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...]))
262
+ """
263
+ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
264
+ batch_size_times_num_heads = batch_size * num_heads
265
+ # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length]
266
+ # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim]
267
+ return tuple(
268
+ (
269
+ layer_past[0].reshape(batch_size_times_num_heads, head_dim, seq_length),
270
+ layer_past[1].reshape(batch_size_times_num_heads, seq_length, head_dim),
271
+ )
272
+ for layer_past in past_key_value
273
+ )
274
+
275
+
276
+ MPT_START_DOCSTRING = r"""
277
+
278
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
279
+ library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)
280
+
281
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
282
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
283
+ and behavior.
284
+
285
+ Parameters:
286
+ config ([`MptConfig`]): Model configuration class with all the parameters of the model.
287
+ Initializing with a config file does not load the weights associated with the model, only the
288
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
289
+ """
290
+
291
+ MPT_INPUTS_DOCSTRING = r"""
292
+ Args:
293
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
294
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`
295
+ (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
296
+
297
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
298
+ `input_ids`.
299
+
300
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
301
+ [`PreTrainedTokenizer.__call__`] for details.
302
+
303
+ [What are input IDs?](../glossary#input-ids)
304
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
305
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
306
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
307
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
308
+
309
+ Each element of `past_key_values` is a tuple (past_key, past_value):
310
+ - past_key: [batch_size * num_heads, head_dim, kv_length]
311
+ - past_value: [batch_size * num_heads, kv_length, head_dim]
312
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
313
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
314
+
315
+ - 1 for tokens that are **not masked**,
316
+ - 0 for tokens that are **masked**.
317
+
318
+ [What are attention masks?](../glossary#attention-mask)
319
+
320
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
321
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
322
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
323
+ model's internal embedding lookup matrix.
324
+
325
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
326
+ `past_key_values`).
327
+ use_cache (`bool`, *optional*):
328
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
329
+ `past_key_values`).
330
+ output_attentions (`bool`, *optional*):
331
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
332
+ tensors for more detail.
333
+ output_hidden_states (`bool`, *optional*):
334
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
335
+ more detail.
336
+ return_dict (`bool`, *optional*):
337
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
338
+ """
339
+
340
+
341
+ @add_start_docstrings(
342
+ "The bare Mpt Model transformer outputting raw hidden-states without any specific head on top.",
343
+ MPT_START_DOCSTRING,
344
+ )
345
+ class MptModel(MptPreTrainedModel):
346
+ def __init__(self, config: MptConfig):
347
+ super().__init__(config)
348
+
349
+ self.hidden_size = config.hidden_size
350
+ self.num_heads = config.n_heads
351
+
352
+ # Embedding + LN Embedding
353
+ self.wte = nn.Embedding(config.vocab_size, self.hidden_size)
354
+
355
+ # Transformer blocks
356
+ self.blocks = nn.ModuleList([MptBlock(config) for _ in range(config.n_layers)])
357
+
358
+ # Final Layer Norm
359
+ self.norm_f = LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon)
360
+ # backward compatibility with weights on the Hub
361
+ self.norm_f.bias = None
362
+
363
+ self.gradient_checkpointing = False
364
+
365
+ # Initialize weights and apply final processing
366
+ self.post_init()
367
+
368
+ def get_input_embeddings(self):
369
+ return self.wte
370
+
371
+ def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None):
372
+ return build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max, device)
373
+
374
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
375
+ self.wte = new_embeddings
376
+
377
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
378
+ @add_code_sample_docstrings(
379
+ checkpoint=_CHECKPOINT_FOR_DOC,
380
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
381
+ config_class=_CONFIG_FOR_DOC,
382
+ )
383
+ def forward(
384
+ self,
385
+ input_ids: Optional[torch.LongTensor] = None,
386
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
387
+ attention_mask: Optional[torch.Tensor] = None,
388
+ inputs_embeds: Optional[torch.LongTensor] = None,
389
+ use_cache: Optional[bool] = None,
390
+ output_attentions: Optional[bool] = None,
391
+ output_hidden_states: Optional[bool] = None,
392
+ return_dict: Optional[bool] = None,
393
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
394
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
395
+ output_hidden_states = (
396
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
397
+ )
398
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
399
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
400
+
401
+ if input_ids is not None and inputs_embeds is not None:
402
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
403
+ elif input_ids is not None:
404
+ batch_size, seq_length = input_ids.shape
405
+ elif inputs_embeds is not None:
406
+ batch_size, seq_length, _ = inputs_embeds.shape
407
+ else:
408
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
409
+
410
+ if past_key_values is None:
411
+ past_key_values = tuple([None] * len(self.blocks))
412
+
413
+ if inputs_embeds is None:
414
+ inputs_embeds = self.wte(input_ids)
415
+
416
+ hidden_states = inputs_embeds
417
+
418
+ presents = () if use_cache else None
419
+ all_self_attentions = () if output_attentions else None
420
+ all_hidden_states = () if output_hidden_states else None
421
+
422
+ if self.gradient_checkpointing and self.training:
423
+ if use_cache:
424
+ logger.warning_once(
425
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
426
+ )
427
+ use_cache = False
428
+
429
+ # Compute alibi tensor: check build_alibi_tensor documentation
430
+ seq_length_with_past = seq_length
431
+ past_key_values_length = 0
432
+ if past_key_values[0] is not None:
433
+ past_key_values_length = past_key_values[0][0].shape[2]
434
+ seq_length_with_past = seq_length_with_past + past_key_values_length
435
+ if attention_mask is None:
436
+ attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
437
+ else:
438
+ attention_mask = attention_mask.to(hidden_states.device)
439
+
440
+ alibi = self.build_mpt_alibi_tensor(self.num_heads, self.config.max_seq_len, device=hidden_states.device)
441
+
442
+ causal_mask = _prepare_4d_causal_attention_mask(
443
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
444
+ )
445
+ causal_mask = causal_mask.bool()
446
+
447
+ for block, layer_past in zip(self.blocks, past_key_values):
448
+ if output_hidden_states:
449
+ all_hidden_states = all_hidden_states + (hidden_states,)
450
+
451
+ if self.gradient_checkpointing and self.training:
452
+ outputs = self._gradient_checkpointing_func(
453
+ block.__call__,
454
+ hidden_states,
455
+ alibi,
456
+ causal_mask,
457
+ layer_past,
458
+ use_cache,
459
+ output_attentions,
460
+ )
461
+ else:
462
+ outputs = block(
463
+ hidden_states,
464
+ layer_past=layer_past,
465
+ attention_mask=causal_mask,
466
+ use_cache=use_cache,
467
+ output_attentions=output_attentions,
468
+ position_bias=alibi,
469
+ )
470
+
471
+ hidden_states = outputs[0]
472
+ if use_cache is True:
473
+ presents = presents + (outputs[1],)
474
+
475
+ if output_attentions:
476
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
477
+
478
+ # Add last hidden state
479
+ hidden_states = self.norm_f(hidden_states)
480
+
481
+ if output_hidden_states:
482
+ all_hidden_states = all_hidden_states + (hidden_states,)
483
+
484
+ if not return_dict:
485
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
486
+
487
+ return BaseModelOutputWithPastAndCrossAttentions(
488
+ last_hidden_state=hidden_states,
489
+ past_key_values=presents,
490
+ hidden_states=all_hidden_states,
491
+ attentions=all_self_attentions,
492
+ )
493
+
494
+
495
+ @add_start_docstrings(
496
+ """
497
+ The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
498
+ embeddings).
499
+ """,
500
+ MPT_START_DOCSTRING,
501
+ )
502
+ class MptForCausalLM(MptPreTrainedModel):
503
+ _tied_weights_keys = ["lm_head.weight"]
504
+
505
+ def __init__(self, config: MptConfig):
506
+ super().__init__(config)
507
+ self.transformer = MptModel(config)
508
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
509
+
510
+ # Initialize weights and apply final processing
511
+ self.post_init()
512
+
513
+ def get_output_embeddings(self):
514
+ return self.lm_head
515
+
516
+ def set_output_embeddings(self, new_embeddings: torch.Tensor):
517
+ self.lm_head = new_embeddings
518
+
519
+ def prepare_inputs_for_generation(
520
+ self,
521
+ input_ids: torch.LongTensor,
522
+ past_key_values: Optional[torch.Tensor] = None,
523
+ attention_mask: Optional[torch.Tensor] = None,
524
+ inputs_embeds: Optional[torch.Tensor] = None,
525
+ use_cache: Optional[bool] = None,
526
+ **kwargs,
527
+ ) -> dict:
528
+ # only last tokens for input_ids if past is not None
529
+ if past_key_values is not None:
530
+ past_length = past_key_values[0][0].shape[2]
531
+
532
+ # Some generation methods already pass only the last input ID
533
+ if input_ids.shape[1] > past_length:
534
+ remove_prefix_length = past_length
535
+ else:
536
+ # Default to old behavior: keep only final ID
537
+ remove_prefix_length = input_ids.shape[1] - 1
538
+
539
+ input_ids = input_ids[:, remove_prefix_length:]
540
+
541
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
542
+ if inputs_embeds is not None and past_key_values is None:
543
+ model_inputs = {"inputs_embeds": inputs_embeds}
544
+ else:
545
+ model_inputs = {"input_ids": input_ids}
546
+
547
+ model_inputs.update(
548
+ {
549
+ "past_key_values": past_key_values, # NITS should it be layer_past?
550
+ "use_cache": use_cache,
551
+ "attention_mask": attention_mask,
552
+ }
553
+ )
554
+ return model_inputs
555
+
556
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
557
+ @add_code_sample_docstrings(
558
+ checkpoint=_CHECKPOINT_FOR_DOC,
559
+ output_type=CausalLMOutputWithCrossAttentions,
560
+ config_class=_CONFIG_FOR_DOC,
561
+ )
562
+ def forward(
563
+ self,
564
+ input_ids: Optional[torch.LongTensor] = None,
565
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
566
+ attention_mask: Optional[torch.Tensor] = None,
567
+ inputs_embeds: Optional[torch.Tensor] = None,
568
+ labels: Optional[torch.Tensor] = None,
569
+ use_cache: Optional[bool] = None,
570
+ output_attentions: Optional[bool] = None,
571
+ output_hidden_states: Optional[bool] = None,
572
+ return_dict: Optional[bool] = None,
573
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
574
+ r"""
575
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
576
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
577
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
578
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
579
+ """
580
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
581
+
582
+ transformer_outputs = self.transformer(
583
+ input_ids,
584
+ past_key_values=past_key_values,
585
+ attention_mask=attention_mask,
586
+ inputs_embeds=inputs_embeds,
587
+ use_cache=use_cache,
588
+ output_attentions=output_attentions,
589
+ output_hidden_states=output_hidden_states,
590
+ return_dict=return_dict,
591
+ )
592
+ hidden_states = transformer_outputs[0]
593
+
594
+ lm_logits = self.lm_head(hidden_states)
595
+
596
+ loss = None
597
+ if labels is not None:
598
+ # move labels to correct device to enable model parallelism
599
+ labels = labels.to(lm_logits.device)
600
+ # Shift so that tokens < n predict n
601
+ shift_logits = lm_logits[..., :-1, :].contiguous()
602
+ shift_labels = labels[..., 1:].contiguous()
603
+ batch_size, seq_length, vocab_size = shift_logits.shape
604
+ # Flatten the tokens
605
+ loss_fct = CrossEntropyLoss()
606
+ loss = loss_fct(
607
+ shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
608
+ )
609
+
610
+ if not return_dict:
611
+ output = (lm_logits,) + transformer_outputs[1:]
612
+ return ((loss,) + output) if loss is not None else output
613
+
614
+ return CausalLMOutputWithCrossAttentions(
615
+ loss=loss,
616
+ logits=lm_logits,
617
+ past_key_values=transformer_outputs.past_key_values,
618
+ hidden_states=transformer_outputs.hidden_states,
619
+ attentions=transformer_outputs.attentions,
620
+ )
621
+
622
+ def _reorder_cache(
623
+ self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
624
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
625
+ """
626
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
627
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
628
+ beam_idx at every generation step.
629
+
630
+ Output shares the same memory storage as `past`.
631
+ """
632
+ # Get a copy of `beam_idx` on all the devices where we need those indices.
633
+ device_to_beam_idx = {
634
+ past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past
635
+ }
636
+ reordered_past = tuple(
637
+ (
638
+ layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
639
+ layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
640
+ )
641
+ for layer_past in past
642
+ )
643
+ return reordered_past
644
+
645
+
646
+ @add_start_docstrings(
647
+ """
648
+ The MPT Model transformer with a sequence classification head on top (linear layer).
649
+
650
+ [`MptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
651
+ (e.g. GPT-1) do.
652
+
653
+ Since it does classification on the last token, it requires to know the position of the last token. If a
654
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
655
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
656
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
657
+ each row of the batch).
658
+ """,
659
+ MPT_START_DOCSTRING,
660
+ )
661
+ class MptForSequenceClassification(MptPreTrainedModel):
662
+ def __init__(self, config: MptConfig):
663
+ super().__init__(config)
664
+ self.num_labels = config.num_labels
665
+ self.transformer = MptModel(config)
666
+ self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
667
+
668
+ # Initialize weights and apply final processing
669
+ self.post_init()
670
+
671
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
672
+ @add_code_sample_docstrings(
673
+ checkpoint=_CHECKPOINT_FOR_DOC,
674
+ output_type=SequenceClassifierOutputWithPast,
675
+ config_class=_CONFIG_FOR_DOC,
676
+ )
677
+ def forward(
678
+ self,
679
+ input_ids: Optional[torch.LongTensor] = None,
680
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
681
+ attention_mask: Optional[torch.Tensor] = None,
682
+ inputs_embeds: Optional[torch.Tensor] = None,
683
+ labels: Optional[torch.Tensor] = None,
684
+ use_cache: Optional[bool] = None,
685
+ output_attentions: Optional[bool] = None,
686
+ output_hidden_states: Optional[bool] = None,
687
+ return_dict: Optional[bool] = None,
688
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
689
+ r"""
690
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
691
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
692
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
693
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
694
+ """
695
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
696
+
697
+ transformer_outputs = self.transformer(
698
+ input_ids,
699
+ past_key_values=past_key_values,
700
+ attention_mask=attention_mask,
701
+ inputs_embeds=inputs_embeds,
702
+ use_cache=use_cache,
703
+ output_attentions=output_attentions,
704
+ output_hidden_states=output_hidden_states,
705
+ return_dict=return_dict,
706
+ )
707
+
708
+ hidden_states = transformer_outputs[0]
709
+ logits = self.score(hidden_states)
710
+
711
+ if input_ids is not None:
712
+ batch_size = input_ids.shape[0]
713
+ else:
714
+ batch_size = inputs_embeds.shape[0]
715
+
716
+ if self.config.pad_token_id is None and batch_size != 1:
717
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
718
+ if self.config.pad_token_id is None:
719
+ sequence_lengths = -1
720
+ else:
721
+ if input_ids is not None:
722
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
723
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
724
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
725
+ sequence_lengths = sequence_lengths.to(logits.device)
726
+ else:
727
+ sequence_lengths = -1
728
+ logger.warning(
729
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
730
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
731
+ )
732
+
733
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
734
+
735
+ loss = None
736
+ if labels is not None:
737
+ if self.config.problem_type is None:
738
+ if self.num_labels == 1:
739
+ self.config.problem_type = "regression"
740
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
741
+ self.config.problem_type = "single_label_classification"
742
+ else:
743
+ self.config.problem_type = "multi_label_classification"
744
+
745
+ if self.config.problem_type == "regression":
746
+ loss_fct = MSELoss()
747
+ if self.num_labels == 1:
748
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
749
+ else:
750
+ loss = loss_fct(pooled_logits, labels)
751
+ elif self.config.problem_type == "single_label_classification":
752
+ loss_fct = CrossEntropyLoss()
753
+ loss = loss_fct(pooled_logits, labels)
754
+ elif self.config.problem_type == "multi_label_classification":
755
+ loss_fct = BCEWithLogitsLoss()
756
+ loss = loss_fct(pooled_logits, labels)
757
+ if not return_dict:
758
+ output = (pooled_logits,) + transformer_outputs[1:]
759
+ return ((loss,) + output) if loss is not None else output
760
+
761
+ return SequenceClassifierOutputWithPast(
762
+ loss=loss,
763
+ logits=pooled_logits,
764
+ past_key_values=transformer_outputs.past_key_values,
765
+ hidden_states=transformer_outputs.hidden_states,
766
+ attentions=transformer_outputs.attentions,
767
+ )
768
+
769
+
770
+ @add_start_docstrings(
771
+ """
772
+ MPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
773
+ Named-Entity-Recognition (NER) tasks.
774
+ """,
775
+ MPT_START_DOCSTRING,
776
+ )
777
+ class MptForTokenClassification(MptPreTrainedModel):
778
+ def __init__(self, config: MptConfig):
779
+ super().__init__(config)
780
+ self.num_labels = config.num_labels
781
+
782
+ self.transformer = MptModel(config)
783
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
784
+ classifier_dropout = config.classifier_dropout
785
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
786
+ classifier_dropout = config.hidden_dropout
787
+ else:
788
+ classifier_dropout = 0.1
789
+ self.dropout = nn.Dropout(classifier_dropout)
790
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
791
+
792
+ # Initialize weights and apply final processing
793
+ self.post_init()
794
+
795
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
796
+ @add_code_sample_docstrings(
797
+ checkpoint=_CHECKPOINT_FOR_DOC,
798
+ output_type=TokenClassifierOutput,
799
+ config_class=_CONFIG_FOR_DOC,
800
+ )
801
+ def forward(
802
+ self,
803
+ input_ids: Optional[torch.LongTensor] = None,
804
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
805
+ attention_mask: Optional[torch.Tensor] = None,
806
+ inputs_embeds: Optional[torch.Tensor] = None,
807
+ labels: Optional[torch.Tensor] = None,
808
+ use_cache: Optional[bool] = None,
809
+ output_attentions: Optional[bool] = None,
810
+ output_hidden_states: Optional[bool] = None,
811
+ return_dict: Optional[bool] = None,
812
+ **deprecated_arguments,
813
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
814
+ r"""
815
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
816
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
817
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
818
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
819
+ """
820
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
821
+
822
+ transformer_outputs = self.transformer(
823
+ input_ids,
824
+ past_key_values=past_key_values,
825
+ attention_mask=attention_mask,
826
+ inputs_embeds=inputs_embeds,
827
+ use_cache=use_cache,
828
+ output_attentions=output_attentions,
829
+ output_hidden_states=output_hidden_states,
830
+ return_dict=return_dict,
831
+ )
832
+
833
+ hidden_states = transformer_outputs[0]
834
+ hidden_states = self.dropout(hidden_states)
835
+ logits = self.classifier(hidden_states)
836
+
837
+ loss = None
838
+ if labels is not None:
839
+ # move labels to correct device to enable model parallelism
840
+ labels = labels.to(logits.device)
841
+ batch_size, seq_length = labels.shape
842
+ loss_fct = CrossEntropyLoss()
843
+ loss = loss_fct(
844
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
845
+ )
846
+
847
+ if not return_dict:
848
+ output = (logits,) + transformer_outputs[2:]
849
+ return ((loss,) + output) if loss is not None else output
850
+
851
+ return TokenClassifierOutput(
852
+ loss=loss,
853
+ logits=logits,
854
+ hidden_states=transformer_outputs.hidden_states,
855
+ attentions=transformer_outputs.attentions,
856
+ )
857
+
858
+
859
+ @add_start_docstrings(
860
+ """
861
+ The MPT Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD
862
+ (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
863
+ """,
864
+ MPT_START_DOCSTRING,
865
+ )
866
+ class MptForQuestionAnswering(MptPreTrainedModel):
867
+ def __init__(self, config):
868
+ super().__init__(config)
869
+ self.transformer = MptModel(config)
870
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
871
+
872
+ # Initialize weights and apply final processing
873
+ self.post_init()
874
+
875
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
876
+ def forward(
877
+ self,
878
+ input_ids: Optional[torch.LongTensor] = None,
879
+ attention_mask: Optional[torch.FloatTensor] = None,
880
+ inputs_embeds: Optional[torch.FloatTensor] = None,
881
+ start_positions: Optional[torch.LongTensor] = None,
882
+ end_positions: Optional[torch.LongTensor] = None,
883
+ output_attentions: Optional[bool] = None,
884
+ output_hidden_states: Optional[bool] = None,
885
+ return_dict: Optional[bool] = None,
886
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
887
+ r"""
888
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
889
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
890
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
891
+ are not taken into account for computing the loss.
892
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
893
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
894
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
895
+ are not taken into account for computing the loss.
896
+ """
897
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
898
+
899
+ outputs = self.transformer(
900
+ input_ids,
901
+ attention_mask=attention_mask,
902
+ inputs_embeds=inputs_embeds,
903
+ output_attentions=output_attentions,
904
+ output_hidden_states=output_hidden_states,
905
+ return_dict=return_dict,
906
+ )
907
+
908
+ sequence_output = outputs[0]
909
+
910
+ logits = self.qa_outputs(sequence_output)
911
+ start_logits, end_logits = logits.split(1, dim=-1)
912
+ start_logits = start_logits.squeeze(-1).contiguous()
913
+ end_logits = end_logits.squeeze(-1).contiguous()
914
+
915
+ total_loss = None
916
+ if start_positions is not None and end_positions is not None:
917
+ # If we are on multi-GPU, split add a dimension
918
+ if len(start_positions.size()) > 1:
919
+ start_positions = start_positions.squeeze(-1)
920
+ if len(end_positions.size()) > 1:
921
+ end_positions = end_positions.squeeze(-1)
922
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
923
+ ignored_index = start_logits.size(1)
924
+ start_positions = start_positions.clamp(0, ignored_index)
925
+ end_positions = end_positions.clamp(0, ignored_index)
926
+
927
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
928
+ start_loss = loss_fct(start_logits, start_positions)
929
+ end_loss = loss_fct(end_logits, end_positions)
930
+ total_loss = (start_loss + end_loss) / 2
931
+
932
+ if not return_dict:
933
+ output = (start_logits, end_logits) + outputs[2:]
934
+ return ((total_loss,) + output) if total_loss is not None else output
935
+
936
+ return QuestionAnsweringModelOutput(
937
+ loss=total_loss,
938
+ start_logits=start_logits,
939
+ end_logits=end_logits,
940
+ hidden_states=outputs.hidden_states,
941
+ attentions=outputs.attentions,
942
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_musicgen": [
21
+ "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "MusicgenConfig",
23
+ "MusicgenDecoderConfig",
24
+ ],
25
+ "processing_musicgen": ["MusicgenProcessor"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_musicgen"] = [
35
+ "MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "MusicgenForConditionalGeneration",
37
+ "MusicgenForCausalLM",
38
+ "MusicgenModel",
39
+ "MusicgenPreTrainedModel",
40
+ ]
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_musicgen import (
44
+ MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP,
45
+ MusicgenConfig,
46
+ MusicgenDecoderConfig,
47
+ )
48
+ from .processing_musicgen import MusicgenProcessor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_musicgen import (
57
+ MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ MusicgenForCausalLM,
59
+ MusicgenForConditionalGeneration,
60
+ MusicgenModel,
61
+ MusicgenPreTrainedModel,
62
+ )
63
+
64
+ else:
65
+ import sys
66
+
67
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/convert_musicgen_transformers.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MusicGen checkpoints from the original repository."""
16
+ import argparse
17
+ from pathlib import Path
18
+ from typing import Dict, OrderedDict, Tuple
19
+
20
+ import torch
21
+ from audiocraft.models import MusicGen
22
+
23
+ from transformers import (
24
+ AutoFeatureExtractor,
25
+ AutoTokenizer,
26
+ EncodecModel,
27
+ MusicgenDecoderConfig,
28
+ MusicgenForConditionalGeneration,
29
+ MusicgenProcessor,
30
+ T5EncoderModel,
31
+ )
32
+ from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
33
+ from transformers.utils import logging
34
+
35
+
36
+ logging.set_verbosity_info()
37
+ logger = logging.get_logger(__name__)
38
+
39
+
40
+ EXPECTED_MISSING_KEYS = ["model.decoder.embed_positions.weights"]
41
+
42
+
43
+ def rename_keys(name):
44
+ if "emb" in name:
45
+ name = name.replace("emb", "model.decoder.embed_tokens")
46
+ if "transformer" in name:
47
+ name = name.replace("transformer", "model.decoder")
48
+ if "cross_attention" in name:
49
+ name = name.replace("cross_attention", "encoder_attn")
50
+ if "linear1" in name:
51
+ name = name.replace("linear1", "fc1")
52
+ if "linear2" in name:
53
+ name = name.replace("linear2", "fc2")
54
+ if "norm1" in name:
55
+ name = name.replace("norm1", "self_attn_layer_norm")
56
+ if "norm_cross" in name:
57
+ name = name.replace("norm_cross", "encoder_attn_layer_norm")
58
+ if "norm2" in name:
59
+ name = name.replace("norm2", "final_layer_norm")
60
+ if "out_norm" in name:
61
+ name = name.replace("out_norm", "model.decoder.layer_norm")
62
+ if "linears" in name:
63
+ name = name.replace("linears", "lm_heads")
64
+ if "condition_provider.conditioners.description.output_proj" in name:
65
+ name = name.replace("condition_provider.conditioners.description.output_proj", "enc_to_dec_proj")
66
+ return name
67
+
68
+
69
+ def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> Tuple[Dict, Dict]:
70
+ """Function that takes the fairseq Musicgen state dict and renames it according to the HF
71
+ module names. It further partitions the state dict into the decoder (LM) state dict, and that for the
72
+ encoder-decoder projection."""
73
+ keys = list(state_dict.keys())
74
+ enc_dec_proj_state_dict = {}
75
+ for key in keys:
76
+ val = state_dict.pop(key)
77
+ key = rename_keys(key)
78
+ if "in_proj_weight" in key:
79
+ # split fused qkv proj
80
+ state_dict[key.replace("in_proj_weight", "q_proj.weight")] = val[:hidden_size, :]
81
+ state_dict[key.replace("in_proj_weight", "k_proj.weight")] = val[hidden_size : 2 * hidden_size, :]
82
+ state_dict[key.replace("in_proj_weight", "v_proj.weight")] = val[-hidden_size:, :]
83
+ elif "enc_to_dec_proj" in key:
84
+ enc_dec_proj_state_dict[key[len("enc_to_dec_proj.") :]] = val
85
+ else:
86
+ state_dict[key] = val
87
+ return state_dict, enc_dec_proj_state_dict
88
+
89
+
90
+ def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenDecoderConfig:
91
+ if checkpoint == "small" or checkpoint == "facebook/musicgen-stereo-small":
92
+ # default config values
93
+ hidden_size = 1024
94
+ num_hidden_layers = 24
95
+ num_attention_heads = 16
96
+ elif checkpoint == "medium" or checkpoint == "facebook/musicgen-stereo-medium":
97
+ hidden_size = 1536
98
+ num_hidden_layers = 48
99
+ num_attention_heads = 24
100
+ elif checkpoint == "large" or checkpoint == "facebook/musicgen-stereo-large":
101
+ hidden_size = 2048
102
+ num_hidden_layers = 48
103
+ num_attention_heads = 32
104
+ else:
105
+ raise ValueError(
106
+ "Checkpoint should be one of `['small', 'medium', 'large']` for the mono checkpoints, "
107
+ "or `['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` "
108
+ f"for the stereo checkpoints, got {checkpoint}."
109
+ )
110
+
111
+ if "stereo" in checkpoint:
112
+ audio_channels = 2
113
+ num_codebooks = 8
114
+ else:
115
+ audio_channels = 1
116
+ num_codebooks = 4
117
+
118
+ config = MusicgenDecoderConfig(
119
+ hidden_size=hidden_size,
120
+ ffn_dim=hidden_size * 4,
121
+ num_hidden_layers=num_hidden_layers,
122
+ num_attention_heads=num_attention_heads,
123
+ num_codebooks=num_codebooks,
124
+ audio_channels=audio_channels,
125
+ )
126
+ return config
127
+
128
+
129
+ @torch.no_grad()
130
+ def convert_musicgen_checkpoint(
131
+ checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", safe_serialization=False
132
+ ):
133
+ fairseq_model = MusicGen.get_pretrained(checkpoint, device=device)
134
+ decoder_config = decoder_config_from_checkpoint(checkpoint)
135
+
136
+ decoder_state_dict = fairseq_model.lm.state_dict()
137
+ decoder_state_dict, enc_dec_proj_state_dict = rename_state_dict(
138
+ decoder_state_dict, hidden_size=decoder_config.hidden_size
139
+ )
140
+
141
+ text_encoder = T5EncoderModel.from_pretrained("google-t5/t5-base")
142
+ audio_encoder = EncodecModel.from_pretrained("facebook/encodec_32khz")
143
+ decoder = MusicgenForCausalLM(decoder_config).eval()
144
+
145
+ # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
146
+ missing_keys, unexpected_keys = decoder.load_state_dict(decoder_state_dict, strict=False)
147
+
148
+ for key in missing_keys.copy():
149
+ if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS:
150
+ missing_keys.remove(key)
151
+
152
+ if len(missing_keys) > 0:
153
+ raise ValueError(f"Missing key(s) in state_dict: {missing_keys}")
154
+
155
+ if len(unexpected_keys) > 0:
156
+ raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}")
157
+
158
+ # init the composite model
159
+ model = MusicgenForConditionalGeneration(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder)
160
+
161
+ # load the pre-trained enc-dec projection (from the decoder state dict)
162
+ model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict)
163
+
164
+ # check we can do a forward pass
165
+ input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1)
166
+ decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1)
167
+
168
+ with torch.no_grad():
169
+ logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits
170
+
171
+ if logits.shape != (2 * decoder_config.num_codebooks, 1, 2048):
172
+ raise ValueError("Incorrect shape for logits")
173
+
174
+ # now construct the processor
175
+ tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
176
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
177
+ "facebook/encodec_32khz", padding_side="left", feature_size=decoder_config.audio_channels
178
+ )
179
+
180
+ processor = MusicgenProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
181
+
182
+ # set the appropriate bos/pad token ids
183
+ model.generation_config.decoder_start_token_id = 2048
184
+ model.generation_config.pad_token_id = 2048
185
+
186
+ # set other default generation config params
187
+ model.generation_config.max_length = int(30 * audio_encoder.config.frame_rate)
188
+ model.generation_config.do_sample = True
189
+ model.generation_config.guidance_scale = 3.0
190
+
191
+ if pytorch_dump_folder is not None:
192
+ Path(pytorch_dump_folder).mkdir(exist_ok=True)
193
+ logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}")
194
+ model.save_pretrained(pytorch_dump_folder, safe_serialization=safe_serialization)
195
+ processor.save_pretrained(pytorch_dump_folder)
196
+
197
+ if repo_id:
198
+ logger.info(f"Pushing model {checkpoint} to {repo_id}")
199
+ model.push_to_hub(repo_id, safe_serialization=safe_serialization)
200
+ processor.push_to_hub(repo_id)
201
+
202
+
203
+ if __name__ == "__main__":
204
+ parser = argparse.ArgumentParser()
205
+ # Required parameters
206
+ parser.add_argument(
207
+ "--checkpoint",
208
+ default="small",
209
+ type=str,
210
+ help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: "
211
+ "`['small', 'medium', 'large']` for the mono checkpoints, or "
212
+ "`['facebook/musicgen-stereo-small', 'facebook/musicgen-stereo-medium', 'facebook/musicgen-stereo-large']` "
213
+ "for the stereo checkpoints.",
214
+ )
215
+ parser.add_argument(
216
+ "--pytorch_dump_folder",
217
+ required=True,
218
+ default=None,
219
+ type=str,
220
+ help="Path to the output PyTorch model directory.",
221
+ )
222
+ parser.add_argument(
223
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
224
+ )
225
+ parser.add_argument(
226
+ "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
227
+ )
228
+ parser.add_argument(
229
+ "--safe_serialization",
230
+ action="store_true",
231
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).",
232
+ )
233
+
234
+ args = parser.parse_args()
235
+ convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen/processing_musicgen.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Text/audio processor class for MusicGen
17
+ """
18
+ from typing import List, Optional
19
+
20
+ import numpy as np
21
+
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...utils import to_numpy
24
+
25
+
26
+ class MusicgenProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor
29
+ class.
30
+
31
+ [`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See
32
+ [`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ feature_extractor (`EncodecFeatureExtractor`):
36
+ An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.
37
+ tokenizer (`T5Tokenizer`):
38
+ An instance of [`T5Tokenizer`]. The tokenizer is a required input.
39
+ """
40
+
41
+ feature_extractor_class = "EncodecFeatureExtractor"
42
+ tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
43
+
44
+ def __init__(self, feature_extractor, tokenizer):
45
+ super().__init__(feature_extractor, tokenizer)
46
+ self.current_processor = self.feature_extractor
47
+ self._in_target_context_manager = False
48
+
49
+ def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
50
+ return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
51
+
52
+ def __call__(self, *args, **kwargs):
53
+ """
54
+ Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text`
55
+ argument to [`~T5Tokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
56
+ information.
57
+ """
58
+ # For backward compatibility
59
+ if self._in_target_context_manager:
60
+ return self.current_processor(*args, **kwargs)
61
+
62
+ audio = kwargs.pop("audio", None)
63
+ sampling_rate = kwargs.pop("sampling_rate", None)
64
+ text = kwargs.pop("text", None)
65
+ if len(args) > 0:
66
+ audio = args[0]
67
+ args = args[1:]
68
+
69
+ if audio is None and text is None:
70
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
71
+
72
+ if text is not None:
73
+ inputs = self.tokenizer(text, **kwargs)
74
+
75
+ if audio is not None:
76
+ audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
77
+
78
+ if audio is None:
79
+ return inputs
80
+
81
+ elif text is None:
82
+ return audio_inputs
83
+
84
+ else:
85
+ inputs["input_values"] = audio_inputs["input_values"]
86
+ if "padding_mask" in audio_inputs:
87
+ inputs["padding_mask"] = audio_inputs["padding_mask"]
88
+ return inputs
89
+
90
+ def batch_decode(self, *args, **kwargs):
91
+ """
92
+ This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
93
+ from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
94
+ [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
95
+ """
96
+ audio_values = kwargs.pop("audio", None)
97
+ padding_mask = kwargs.pop("padding_mask", None)
98
+
99
+ if len(args) > 0:
100
+ audio_values = args[0]
101
+ args = args[1:]
102
+
103
+ if audio_values is not None:
104
+ return self._decode_audio(audio_values, padding_mask=padding_mask)
105
+ else:
106
+ return self.tokenizer.batch_decode(*args, **kwargs)
107
+
108
+ def decode(self, *args, **kwargs):
109
+ """
110
+ This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
111
+ docstring of this method for more information.
112
+ """
113
+ return self.tokenizer.decode(*args, **kwargs)
114
+
115
+ def _decode_audio(self, audio_values, padding_mask: Optional = None) -> List[np.ndarray]:
116
+ """
117
+ This method strips any padding from the audio values to return a list of numpy audio arrays.
118
+ """
119
+ audio_values = to_numpy(audio_values)
120
+ bsz, channels, seq_len = audio_values.shape
121
+
122
+ if padding_mask is None:
123
+ return list(audio_values)
124
+
125
+ padding_mask = to_numpy(padding_mask)
126
+
127
+ # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
128
+ # token (so that the generated audio values are **not** treated as padded tokens)
129
+ difference = seq_len - padding_mask.shape[-1]
130
+ padding_value = 1 - self.feature_extractor.padding_value
131
+ padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
132
+
133
+ audio_values = audio_values.tolist()
134
+ for i in range(bsz):
135
+ sliced_audio = np.asarray(audio_values[i])[
136
+ padding_mask[i][None, :] != self.feature_extractor.padding_value
137
+ ]
138
+ audio_values[i] = sliced_audio.reshape(channels, -1)
139
+
140
+ return audio_values
llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_recurrent_gemma": ["RecurrentGemmaConfig"],
25
+ }
26
+
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_recurrent_gemma"] = [
35
+ "RecurrentGemmaForCausalLM",
36
+ "RecurrentGemmaModel",
37
+ "RecurrentGemmaPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_recurrent_gemma import RecurrentGemmaConfig
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ from .modeling_recurrent_gemma import (
51
+ RecurrentGemmaForCausalLM,
52
+ RecurrentGemmaModel,
53
+ RecurrentGemmaPreTrainedModel,
54
+ )
55
+
56
+ else:
57
+ import sys
58
+
59
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (873 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/configuration_recurrent_gemma.cpython-310.pyc ADDED
Binary file (6.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/convert_recurrent_gemma_to_hf.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/recurrent_gemma/__pycache__/modeling_recurrent_gemma.cpython-310.pyc ADDED
Binary file (31.4 kB). View file