applied-ai-018 commited on
Commit
d35edd0
·
verified ·
1 Parent(s): 308a910

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/configuration_camembert.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_camembert.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_tf_camembert.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/transformers/models/dpr/__init__.py +148 -0
  9. venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/configuration_dpr.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/convert_dpr_original_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_dpr.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_tf_dpr.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr_fast.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/transformers/models/dpr/configuration_dpr.py +131 -0
  17. venv/lib/python3.10/site-packages/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py +143 -0
  18. venv/lib/python3.10/site-packages/transformers/models/dpr/modeling_dpr.py +663 -0
  19. venv/lib/python3.10/site-packages/transformers/models/dpr/modeling_tf_dpr.py +797 -0
  20. venv/lib/python3.10/site-packages/transformers/models/dpr/tokenization_dpr.py +319 -0
  21. venv/lib/python3.10/site-packages/transformers/models/dpr/tokenization_dpr_fast.py +319 -0
  22. venv/lib/python3.10/site-packages/transformers/models/efficientnet/__init__.py +84 -0
  23. venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/configuration_efficientnet.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/convert_efficientnet_to_pytorch.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/image_processing_efficientnet.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/modeling_efficientnet.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/efficientnet/configuration_efficientnet.py +169 -0
  29. venv/lib/python3.10/site-packages/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py +339 -0
  30. venv/lib/python3.10/site-packages/transformers/models/efficientnet/image_processing_efficientnet.py +387 -0
  31. venv/lib/python3.10/site-packages/transformers/models/efficientnet/modeling_efficientnet.py +648 -0
  32. venv/lib/python3.10/site-packages/transformers/models/flaubert/__init__.py +103 -0
  33. venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/configuration_flaubert.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_flaubert.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_tf_flaubert.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/tokenization_flaubert.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/flaubert/configuration_flaubert.py +234 -0
  39. venv/lib/python3.10/site-packages/transformers/models/flaubert/modeling_flaubert.py +1302 -0
  40. venv/lib/python3.10/site-packages/transformers/models/flaubert/modeling_tf_flaubert.py +1337 -0
  41. venv/lib/python3.10/site-packages/transformers/models/flaubert/tokenization_flaubert.py +565 -0
  42. venv/lib/python3.10/site-packages/transformers/models/mvp/__init__.py +79 -0
  43. venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/configuration_mvp.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/modeling_mvp.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/tokenization_mvp.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/tokenization_mvp_fast.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/mvp/configuration_mvp.py +179 -0
  48. venv/lib/python3.10/site-packages/transformers/models/mvp/modeling_mvp.py +2009 -0
  49. venv/lib/python3.10/site-packages/transformers/models/mvp/tokenization_mvp.py +391 -0
  50. venv/lib/python3.10/site-packages/transformers/models/mvp/tokenization_mvp_fast.py +279 -0
ckpts/universal/global_step40/zero/11.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aa83f9216fa3b5096ef2ee94ca58f7942ad749237a0c2ea806d12b9a9c43c90
3
+ size 9372
ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:230d66f0f800c64d7ad7e1e2e82adaab9b19f91c2b025cf90a3384f00beb7657
3
+ size 33555612
ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c20f76bc1071fe1f85403339476a751b1f8db14e787d41b3cc11569d9f1e891
3
+ size 33555533
venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/configuration_camembert.cpython-310.pyc ADDED
Binary file (6.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_camembert.cpython-310.pyc ADDED
Binary file (45 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_tf_camembert.cpython-310.pyc ADDED
Binary file (51.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/__init__.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_dpr": ["DPR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPRConfig"],
28
+ "tokenization_dpr": [
29
+ "DPRContextEncoderTokenizer",
30
+ "DPRQuestionEncoderTokenizer",
31
+ "DPRReaderOutput",
32
+ "DPRReaderTokenizer",
33
+ ],
34
+ }
35
+
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_dpr_fast"] = [
44
+ "DPRContextEncoderTokenizerFast",
45
+ "DPRQuestionEncoderTokenizerFast",
46
+ "DPRReaderTokenizerFast",
47
+ ]
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ _import_structure["modeling_dpr"] = [
56
+ "DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
57
+ "DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
58
+ "DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
59
+ "DPRContextEncoder",
60
+ "DPRPretrainedContextEncoder",
61
+ "DPRPreTrainedModel",
62
+ "DPRPretrainedQuestionEncoder",
63
+ "DPRPretrainedReader",
64
+ "DPRQuestionEncoder",
65
+ "DPRReader",
66
+ ]
67
+
68
+ try:
69
+ if not is_tf_available():
70
+ raise OptionalDependencyNotAvailable()
71
+ except OptionalDependencyNotAvailable:
72
+ pass
73
+ else:
74
+ _import_structure["modeling_tf_dpr"] = [
75
+ "TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
76
+ "TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
77
+ "TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
78
+ "TFDPRContextEncoder",
79
+ "TFDPRPretrainedContextEncoder",
80
+ "TFDPRPretrainedQuestionEncoder",
81
+ "TFDPRPretrainedReader",
82
+ "TFDPRQuestionEncoder",
83
+ "TFDPRReader",
84
+ ]
85
+
86
+
87
+ if TYPE_CHECKING:
88
+ from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig
89
+ from .tokenization_dpr import (
90
+ DPRContextEncoderTokenizer,
91
+ DPRQuestionEncoderTokenizer,
92
+ DPRReaderOutput,
93
+ DPRReaderTokenizer,
94
+ )
95
+
96
+ try:
97
+ if not is_tokenizers_available():
98
+ raise OptionalDependencyNotAvailable()
99
+ except OptionalDependencyNotAvailable:
100
+ pass
101
+ else:
102
+ from .tokenization_dpr_fast import (
103
+ DPRContextEncoderTokenizerFast,
104
+ DPRQuestionEncoderTokenizerFast,
105
+ DPRReaderTokenizerFast,
106
+ )
107
+
108
+ try:
109
+ if not is_torch_available():
110
+ raise OptionalDependencyNotAvailable()
111
+ except OptionalDependencyNotAvailable:
112
+ pass
113
+ else:
114
+ from .modeling_dpr import (
115
+ DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
116
+ DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
117
+ DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
118
+ DPRContextEncoder,
119
+ DPRPretrainedContextEncoder,
120
+ DPRPreTrainedModel,
121
+ DPRPretrainedQuestionEncoder,
122
+ DPRPretrainedReader,
123
+ DPRQuestionEncoder,
124
+ DPRReader,
125
+ )
126
+
127
+ try:
128
+ if not is_tf_available():
129
+ raise OptionalDependencyNotAvailable()
130
+ except OptionalDependencyNotAvailable:
131
+ pass
132
+ else:
133
+ from .modeling_tf_dpr import (
134
+ TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
135
+ TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
136
+ TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
137
+ TFDPRContextEncoder,
138
+ TFDPRPretrainedContextEncoder,
139
+ TFDPRPretrainedQuestionEncoder,
140
+ TFDPRPretrainedReader,
141
+ TFDPRQuestionEncoder,
142
+ TFDPRReader,
143
+ )
144
+
145
+ else:
146
+ import sys
147
+
148
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/configuration_dpr.cpython-310.pyc ADDED
Binary file (5.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/convert_dpr_original_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.95 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_dpr.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_tf_dpr.cpython-310.pyc ADDED
Binary file (27.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr_fast.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dpr/configuration_dpr.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2010, DPR authors, The Hugging Face Team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DPR model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class DPRConfig(PretrainedConfig):
28
+ r"""
29
+ [`DPRConfig`] is the configuration class to store the configuration of a *DPRModel*.
30
+
31
+ This is the configuration class to store the configuration of a [`DPRContextEncoder`], [`DPRQuestionEncoder`], or a
32
+ [`DPRReader`]. It is used to instantiate the components of the DPR model according to the specified arguments,
33
+ defining the model component architectures. Instantiating a configuration with the defaults will yield a similar
34
+ configuration to that of the DPRContextEncoder
35
+ [facebook/dpr-ctx_encoder-single-nq-base](https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base)
36
+ architecture.
37
+
38
+ This class is a subclass of [`BertConfig`]. Please check the superclass for the documentation of all kwargs.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 30522):
42
+ Vocabulary size of the DPR model. Defines the different tokens that can be represented by the *inputs_ids*
43
+ passed to the forward method of [`BertModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 12):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 12):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ intermediate_size (`int`, *optional*, defaults to 3072):
51
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
52
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
55
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
57
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout ratio for the attention probabilities.
59
+ max_position_embeddings (`int`, *optional*, defaults to 512):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ type_vocab_size (`int`, *optional*, defaults to 2):
63
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
67
+ The epsilon used by the layer normalization layers.
68
+ pad_token_id (`int`, *optional*, defaults to 0):
69
+ Padding token id.
70
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
71
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
72
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
73
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
74
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
75
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
76
+ projection_dim (`int`, *optional*, defaults to 0):
77
+ Dimension of the projection for the context and question encoders. If it is set to zero (default), then no
78
+ projection is done.
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import DPRConfig, DPRContextEncoder
84
+
85
+ >>> # Initializing a DPR facebook/dpr-ctx_encoder-single-nq-base style configuration
86
+ >>> configuration = DPRConfig()
87
+
88
+ >>> # Initializing a model (with random weights) from the facebook/dpr-ctx_encoder-single-nq-base style configuration
89
+ >>> model = DPRContextEncoder(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "dpr"
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_size=30522,
100
+ hidden_size=768,
101
+ num_hidden_layers=12,
102
+ num_attention_heads=12,
103
+ intermediate_size=3072,
104
+ hidden_act="gelu",
105
+ hidden_dropout_prob=0.1,
106
+ attention_probs_dropout_prob=0.1,
107
+ max_position_embeddings=512,
108
+ type_vocab_size=2,
109
+ initializer_range=0.02,
110
+ layer_norm_eps=1e-12,
111
+ pad_token_id=0,
112
+ position_embedding_type="absolute",
113
+ projection_dim: int = 0,
114
+ **kwargs,
115
+ ):
116
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
117
+
118
+ self.vocab_size = vocab_size
119
+ self.hidden_size = hidden_size
120
+ self.num_hidden_layers = num_hidden_layers
121
+ self.num_attention_heads = num_attention_heads
122
+ self.hidden_act = hidden_act
123
+ self.intermediate_size = intermediate_size
124
+ self.hidden_dropout_prob = hidden_dropout_prob
125
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
126
+ self.max_position_embeddings = max_position_embeddings
127
+ self.type_vocab_size = type_vocab_size
128
+ self.initializer_range = initializer_range
129
+ self.layer_norm_eps = layer_norm_eps
130
+ self.projection_dim = projection_dim
131
+ self.position_embedding_type = position_embedding_type
venv/lib/python3.10/site-packages/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import collections
17
+ from pathlib import Path
18
+
19
+ import torch
20
+ from torch.serialization import default_restore_location
21
+
22
+ from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader
23
+
24
+
25
+ CheckpointState = collections.namedtuple(
26
+ "CheckpointState", ["model_dict", "optimizer_dict", "scheduler_dict", "offset", "epoch", "encoder_params"]
27
+ )
28
+
29
+
30
+ def load_states_from_checkpoint(model_file: str) -> CheckpointState:
31
+ print(f"Reading saved model from {model_file}")
32
+ state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, "cpu"))
33
+ return CheckpointState(**state_dict)
34
+
35
+
36
+ class DPRState:
37
+ def __init__(self, src_file: Path):
38
+ self.src_file = src_file
39
+
40
+ def load_dpr_model(self):
41
+ raise NotImplementedError
42
+
43
+ @staticmethod
44
+ def from_type(comp_type: str, *args, **kwargs) -> "DPRState":
45
+ if comp_type.startswith("c"):
46
+ return DPRContextEncoderState(*args, **kwargs)
47
+ if comp_type.startswith("q"):
48
+ return DPRQuestionEncoderState(*args, **kwargs)
49
+ if comp_type.startswith("r"):
50
+ return DPRReaderState(*args, **kwargs)
51
+ else:
52
+ raise ValueError("Component type must be either 'ctx_encoder', 'question_encoder' or 'reader'.")
53
+
54
+
55
+ class DPRContextEncoderState(DPRState):
56
+ def load_dpr_model(self):
57
+ model = DPRContextEncoder(DPRConfig(**BertConfig.get_config_dict("google-bert/bert-base-uncased")[0]))
58
+ print(f"Loading DPR biencoder from {self.src_file}")
59
+ saved_state = load_states_from_checkpoint(self.src_file)
60
+ encoder, prefix = model.ctx_encoder, "ctx_model."
61
+ # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3
62
+ state_dict = {"bert_model.embeddings.position_ids": model.ctx_encoder.bert_model.embeddings.position_ids}
63
+ for key, value in saved_state.model_dict.items():
64
+ if key.startswith(prefix):
65
+ key = key[len(prefix) :]
66
+ if not key.startswith("encode_proj."):
67
+ key = "bert_model." + key
68
+ state_dict[key] = value
69
+ encoder.load_state_dict(state_dict)
70
+ return model
71
+
72
+
73
+ class DPRQuestionEncoderState(DPRState):
74
+ def load_dpr_model(self):
75
+ model = DPRQuestionEncoder(DPRConfig(**BertConfig.get_config_dict("google-bert/bert-base-uncased")[0]))
76
+ print(f"Loading DPR biencoder from {self.src_file}")
77
+ saved_state = load_states_from_checkpoint(self.src_file)
78
+ encoder, prefix = model.question_encoder, "question_model."
79
+ # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3
80
+ state_dict = {"bert_model.embeddings.position_ids": model.question_encoder.bert_model.embeddings.position_ids}
81
+ for key, value in saved_state.model_dict.items():
82
+ if key.startswith(prefix):
83
+ key = key[len(prefix) :]
84
+ if not key.startswith("encode_proj."):
85
+ key = "bert_model." + key
86
+ state_dict[key] = value
87
+ encoder.load_state_dict(state_dict)
88
+ return model
89
+
90
+
91
+ class DPRReaderState(DPRState):
92
+ def load_dpr_model(self):
93
+ model = DPRReader(DPRConfig(**BertConfig.get_config_dict("google-bert/bert-base-uncased")[0]))
94
+ print(f"Loading DPR reader from {self.src_file}")
95
+ saved_state = load_states_from_checkpoint(self.src_file)
96
+ # Fix changes from https://github.com/huggingface/transformers/commit/614fef1691edb806de976756d4948ecbcd0c0ca3
97
+ state_dict = {
98
+ "encoder.bert_model.embeddings.position_ids": model.span_predictor.encoder.bert_model.embeddings.position_ids
99
+ }
100
+ for key, value in saved_state.model_dict.items():
101
+ if key.startswith("encoder.") and not key.startswith("encoder.encode_proj"):
102
+ key = "encoder.bert_model." + key[len("encoder.") :]
103
+ state_dict[key] = value
104
+ model.span_predictor.load_state_dict(state_dict)
105
+ return model
106
+
107
+
108
+ def convert(comp_type: str, src_file: Path, dest_dir: Path):
109
+ dest_dir = Path(dest_dir)
110
+ dest_dir.mkdir(exist_ok=True)
111
+
112
+ dpr_state = DPRState.from_type(comp_type, src_file=src_file)
113
+ model = dpr_state.load_dpr_model()
114
+ model.save_pretrained(dest_dir)
115
+ model.from_pretrained(dest_dir) # sanity check
116
+
117
+
118
+ if __name__ == "__main__":
119
+ parser = argparse.ArgumentParser()
120
+ # Required parameters
121
+ parser.add_argument(
122
+ "--type", type=str, help="Type of the component to convert: 'ctx_encoder', 'question_encoder' or 'reader'."
123
+ )
124
+ parser.add_argument(
125
+ "--src",
126
+ type=str,
127
+ help=(
128
+ "Path to the dpr checkpoint file. They can be downloaded from the official DPR repo"
129
+ " https://github.com/facebookresearch/DPR. Note that in the official repo, both encoders are stored in the"
130
+ " 'retriever' checkpoints."
131
+ ),
132
+ )
133
+ parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model directory.")
134
+ args = parser.parse_args()
135
+
136
+ src_file = Path(args.src)
137
+ dest_dir = f"converted-{src_file.name}" if args.dest is None else args.dest
138
+ dest_dir = Path(dest_dir)
139
+ assert src_file.exists()
140
+ assert (
141
+ args.type is not None
142
+ ), "Please specify the component type of the DPR model to convert: 'ctx_encoder', 'question_encoder' or 'reader'."
143
+ convert(args.type, src_file, dest_dir)
venv/lib/python3.10/site-packages/transformers/models/dpr/modeling_dpr.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 DPR Authors, The Hugging Face Team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DPR model for Open Domain Question Answering."""
16
+
17
+
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ from torch import Tensor, nn
23
+
24
+ from ...modeling_outputs import BaseModelOutputWithPooling
25
+ from ...modeling_utils import PreTrainedModel
26
+ from ...utils import (
27
+ ModelOutput,
28
+ add_start_docstrings,
29
+ add_start_docstrings_to_model_forward,
30
+ logging,
31
+ replace_return_docstrings,
32
+ )
33
+ from ..bert.modeling_bert import BertModel
34
+ from .configuration_dpr import DPRConfig
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ _CONFIG_FOR_DOC = "DPRConfig"
40
+ _CHECKPOINT_FOR_DOC = "facebook/dpr-ctx_encoder-single-nq-base"
41
+
42
+
43
+ from ..deprecated._archive_maps import ( # noqa: F401, E402
44
+ DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402
45
+ DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402
46
+ DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402
47
+ )
48
+
49
+
50
+ ##########
51
+ # Outputs
52
+ ##########
53
+
54
+
55
+ @dataclass
56
+ class DPRContextEncoderOutput(ModelOutput):
57
+ """
58
+ Class for outputs of [`DPRQuestionEncoder`].
59
+
60
+ Args:
61
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
62
+ The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer
63
+ hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
64
+ This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.
65
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
66
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
67
+ shape `(batch_size, sequence_length, hidden_size)`.
68
+
69
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
70
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
71
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
72
+ sequence_length)`.
73
+
74
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
75
+ heads.
76
+ """
77
+
78
+ pooler_output: torch.FloatTensor
79
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
80
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
81
+
82
+
83
+ @dataclass
84
+ class DPRQuestionEncoderOutput(ModelOutput):
85
+ """
86
+ Class for outputs of [`DPRQuestionEncoder`].
87
+
88
+ Args:
89
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
90
+ The DPR encoder outputs the *pooler_output* that corresponds to the question representation. Last layer
91
+ hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
92
+ This output is to be used to embed questions for nearest neighbors queries with context embeddings.
93
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
94
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
95
+ shape `(batch_size, sequence_length, hidden_size)`.
96
+
97
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
98
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
99
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
100
+ sequence_length)`.
101
+
102
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
103
+ heads.
104
+ """
105
+
106
+ pooler_output: torch.FloatTensor
107
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
108
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
109
+
110
+
111
+ @dataclass
112
+ class DPRReaderOutput(ModelOutput):
113
+ """
114
+ Class for outputs of [`DPRQuestionEncoder`].
115
+
116
+ Args:
117
+ start_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`):
118
+ Logits of the start index of the span for each passage.
119
+ end_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`):
120
+ Logits of the end index of the span for each passage.
121
+ relevance_logits (`torch.FloatTensor` of shape `(n_passages, )`):
122
+ Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the
123
+ question, compared to all the other passages.
124
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
125
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
126
+ shape `(batch_size, sequence_length, hidden_size)`.
127
+
128
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
129
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
130
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
131
+ sequence_length)`.
132
+
133
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
134
+ heads.
135
+ """
136
+
137
+ start_logits: torch.FloatTensor
138
+ end_logits: torch.FloatTensor = None
139
+ relevance_logits: torch.FloatTensor = None
140
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
141
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
142
+
143
+
144
+ class DPRPreTrainedModel(PreTrainedModel):
145
+ def _init_weights(self, module):
146
+ """Initialize the weights"""
147
+ if isinstance(module, nn.Linear):
148
+ # Slightly different from the TF version which uses truncated_normal for initialization
149
+ # cf https://github.com/pytorch/pytorch/pull/5617
150
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
151
+ if module.bias is not None:
152
+ module.bias.data.zero_()
153
+ elif isinstance(module, nn.Embedding):
154
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
155
+ if module.padding_idx is not None:
156
+ module.weight.data[module.padding_idx].zero_()
157
+ elif isinstance(module, nn.LayerNorm):
158
+ module.bias.data.zero_()
159
+ module.weight.data.fill_(1.0)
160
+
161
+
162
+ class DPREncoder(DPRPreTrainedModel):
163
+ base_model_prefix = "bert_model"
164
+
165
+ def __init__(self, config: DPRConfig):
166
+ super().__init__(config)
167
+ self.bert_model = BertModel(config, add_pooling_layer=False)
168
+ if self.bert_model.config.hidden_size <= 0:
169
+ raise ValueError("Encoder hidden_size can't be zero")
170
+ self.projection_dim = config.projection_dim
171
+ if self.projection_dim > 0:
172
+ self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim)
173
+ # Initialize weights and apply final processing
174
+ self.post_init()
175
+
176
+ def forward(
177
+ self,
178
+ input_ids: Tensor,
179
+ attention_mask: Optional[Tensor] = None,
180
+ token_type_ids: Optional[Tensor] = None,
181
+ inputs_embeds: Optional[Tensor] = None,
182
+ output_attentions: bool = False,
183
+ output_hidden_states: bool = False,
184
+ return_dict: bool = False,
185
+ ) -> Union[BaseModelOutputWithPooling, Tuple[Tensor, ...]]:
186
+ outputs = self.bert_model(
187
+ input_ids=input_ids,
188
+ attention_mask=attention_mask,
189
+ token_type_ids=token_type_ids,
190
+ inputs_embeds=inputs_embeds,
191
+ output_attentions=output_attentions,
192
+ output_hidden_states=output_hidden_states,
193
+ return_dict=return_dict,
194
+ )
195
+ sequence_output = outputs[0]
196
+ pooled_output = sequence_output[:, 0, :]
197
+
198
+ if self.projection_dim > 0:
199
+ pooled_output = self.encode_proj(pooled_output)
200
+
201
+ if not return_dict:
202
+ return (sequence_output, pooled_output) + outputs[2:]
203
+
204
+ return BaseModelOutputWithPooling(
205
+ last_hidden_state=sequence_output,
206
+ pooler_output=pooled_output,
207
+ hidden_states=outputs.hidden_states,
208
+ attentions=outputs.attentions,
209
+ )
210
+
211
+ @property
212
+ def embeddings_size(self) -> int:
213
+ if self.projection_dim > 0:
214
+ return self.encode_proj.out_features
215
+ return self.bert_model.config.hidden_size
216
+
217
+
218
+ class DPRSpanPredictor(DPRPreTrainedModel):
219
+ base_model_prefix = "encoder"
220
+
221
+ def __init__(self, config: DPRConfig):
222
+ super().__init__(config)
223
+ self.encoder = DPREncoder(config)
224
+ self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2)
225
+ self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1)
226
+ # Initialize weights and apply final processing
227
+ self.post_init()
228
+
229
+ def forward(
230
+ self,
231
+ input_ids: Tensor,
232
+ attention_mask: Tensor,
233
+ inputs_embeds: Optional[Tensor] = None,
234
+ output_attentions: bool = False,
235
+ output_hidden_states: bool = False,
236
+ return_dict: bool = False,
237
+ ) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]:
238
+ # notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
239
+ n_passages, sequence_length = input_ids.size() if input_ids is not None else inputs_embeds.size()[:2]
240
+ # feed encoder
241
+ outputs = self.encoder(
242
+ input_ids,
243
+ attention_mask=attention_mask,
244
+ inputs_embeds=inputs_embeds,
245
+ output_attentions=output_attentions,
246
+ output_hidden_states=output_hidden_states,
247
+ return_dict=return_dict,
248
+ )
249
+ sequence_output = outputs[0]
250
+
251
+ # compute logits
252
+ logits = self.qa_outputs(sequence_output)
253
+ start_logits, end_logits = logits.split(1, dim=-1)
254
+ start_logits = start_logits.squeeze(-1).contiguous()
255
+ end_logits = end_logits.squeeze(-1).contiguous()
256
+ relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
257
+
258
+ # resize
259
+ start_logits = start_logits.view(n_passages, sequence_length)
260
+ end_logits = end_logits.view(n_passages, sequence_length)
261
+ relevance_logits = relevance_logits.view(n_passages)
262
+
263
+ if not return_dict:
264
+ return (start_logits, end_logits, relevance_logits) + outputs[2:]
265
+
266
+ return DPRReaderOutput(
267
+ start_logits=start_logits,
268
+ end_logits=end_logits,
269
+ relevance_logits=relevance_logits,
270
+ hidden_states=outputs.hidden_states,
271
+ attentions=outputs.attentions,
272
+ )
273
+
274
+
275
+ ##################
276
+ # PreTrainedModel
277
+ ##################
278
+
279
+
280
+ class DPRPretrainedContextEncoder(DPRPreTrainedModel):
281
+ """
282
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
283
+ models.
284
+ """
285
+
286
+ config_class = DPRConfig
287
+ load_tf_weights = None
288
+ base_model_prefix = "ctx_encoder"
289
+
290
+
291
+ class DPRPretrainedQuestionEncoder(DPRPreTrainedModel):
292
+ """
293
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
294
+ models.
295
+ """
296
+
297
+ config_class = DPRConfig
298
+ load_tf_weights = None
299
+ base_model_prefix = "question_encoder"
300
+
301
+
302
+ class DPRPretrainedReader(DPRPreTrainedModel):
303
+ """
304
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
305
+ models.
306
+ """
307
+
308
+ config_class = DPRConfig
309
+ load_tf_weights = None
310
+ base_model_prefix = "span_predictor"
311
+
312
+
313
+ ###############
314
+ # Actual Models
315
+ ###############
316
+
317
+
318
+ DPR_START_DOCSTRING = r"""
319
+
320
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
321
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
322
+ etc.)
323
+
324
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
325
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
326
+ and behavior.
327
+
328
+ Parameters:
329
+ config ([`DPRConfig`]): Model configuration class with all the parameters of the model.
330
+ Initializing with a config file does not load the weights associated with the model, only the
331
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
332
+ """
333
+
334
+ DPR_ENCODERS_INPUTS_DOCSTRING = r"""
335
+ Args:
336
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
337
+ Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
338
+ formatted with [CLS] and [SEP] tokens as follows:
339
+
340
+ (a) For sequence pairs (for a pair title+text for example):
341
+
342
+ ```
343
+ tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
344
+ token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
345
+ ```
346
+
347
+ (b) For single sequences (for a question for example):
348
+
349
+ ```
350
+ tokens: [CLS] the dog is hairy . [SEP]
351
+ token_type_ids: 0 0 0 0 0 0 0
352
+ ```
353
+
354
+ DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
355
+ rather than the left.
356
+
357
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
358
+ [`PreTrainedTokenizer.__call__`] for details.
359
+
360
+ [What are input IDs?](../glossary#input-ids)
361
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
362
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
363
+
364
+ - 1 for tokens that are **not masked**,
365
+ - 0 for tokens that are **masked**.
366
+
367
+ [What are attention masks?](../glossary#attention-mask)
368
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
369
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
370
+ 1]`:
371
+
372
+ - 0 corresponds to a *sentence A* token,
373
+ - 1 corresponds to a *sentence B* token.
374
+
375
+ [What are token type IDs?](../glossary#token-type-ids)
376
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
377
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
378
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
379
+ model's internal embedding lookup matrix.
380
+ output_attentions (`bool`, *optional*):
381
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
382
+ tensors for more detail.
383
+ output_hidden_states (`bool`, *optional*):
384
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
385
+ more detail.
386
+ return_dict (`bool`, *optional*):
387
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
388
+ """
389
+
390
+ DPR_READER_INPUTS_DOCSTRING = r"""
391
+ Args:
392
+ input_ids (`Tuple[torch.LongTensor]` of shapes `(n_passages, sequence_length)`):
393
+ Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question
394
+ and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should
395
+ be formatted with [CLS] and [SEP] with the format:
396
+
397
+ `[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>`
398
+
399
+ DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
400
+ rather than the left.
401
+
402
+ Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details.
403
+
404
+ [What are input IDs?](../glossary#input-ids)
405
+ attention_mask (`torch.FloatTensor` of shape `(n_passages, sequence_length)`, *optional*):
406
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
407
+
408
+ - 1 for tokens that are **not masked**,
409
+ - 0 for tokens that are **masked**.
410
+
411
+ [What are attention masks?](../glossary#attention-mask)
412
+ inputs_embeds (`torch.FloatTensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*):
413
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
414
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
415
+ model's internal embedding lookup matrix.
416
+ output_attentions (`bool`, *optional*):
417
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
418
+ tensors for more detail.
419
+ output_hidden_states (`bool`, *optional*):
420
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
421
+ more detail.
422
+ return_dict (`bool`, *optional*):
423
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
424
+ """
425
+
426
+
427
+ @add_start_docstrings(
428
+ "The bare DPRContextEncoder transformer outputting pooler outputs as context representations.",
429
+ DPR_START_DOCSTRING,
430
+ )
431
+ class DPRContextEncoder(DPRPretrainedContextEncoder):
432
+ def __init__(self, config: DPRConfig):
433
+ super().__init__(config)
434
+ self.config = config
435
+ self.ctx_encoder = DPREncoder(config)
436
+ # Initialize weights and apply final processing
437
+ self.post_init()
438
+
439
+ @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING)
440
+ @replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC)
441
+ def forward(
442
+ self,
443
+ input_ids: Optional[Tensor] = None,
444
+ attention_mask: Optional[Tensor] = None,
445
+ token_type_ids: Optional[Tensor] = None,
446
+ inputs_embeds: Optional[Tensor] = None,
447
+ output_attentions: Optional[bool] = None,
448
+ output_hidden_states: Optional[bool] = None,
449
+ return_dict: Optional[bool] = None,
450
+ ) -> Union[DPRContextEncoderOutput, Tuple[Tensor, ...]]:
451
+ r"""
452
+ Return:
453
+
454
+ Examples:
455
+
456
+ ```python
457
+ >>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
458
+
459
+ >>> tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
460
+ >>> model = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
461
+ >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
462
+ >>> embeddings = model(input_ids).pooler_output
463
+ ```"""
464
+
465
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
466
+ output_hidden_states = (
467
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
468
+ )
469
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
470
+
471
+ if input_ids is not None and inputs_embeds is not None:
472
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
473
+ elif input_ids is not None:
474
+ input_shape = input_ids.size()
475
+ elif inputs_embeds is not None:
476
+ input_shape = inputs_embeds.size()[:-1]
477
+ else:
478
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
479
+
480
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
481
+
482
+ if attention_mask is None:
483
+ attention_mask = (
484
+ torch.ones(input_shape, device=device)
485
+ if input_ids is None
486
+ else (input_ids != self.config.pad_token_id)
487
+ )
488
+ if token_type_ids is None:
489
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
490
+
491
+ outputs = self.ctx_encoder(
492
+ input_ids=input_ids,
493
+ attention_mask=attention_mask,
494
+ token_type_ids=token_type_ids,
495
+ inputs_embeds=inputs_embeds,
496
+ output_attentions=output_attentions,
497
+ output_hidden_states=output_hidden_states,
498
+ return_dict=return_dict,
499
+ )
500
+
501
+ if not return_dict:
502
+ return outputs[1:]
503
+ return DPRContextEncoderOutput(
504
+ pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
505
+ )
506
+
507
+
508
+ @add_start_docstrings(
509
+ "The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.",
510
+ DPR_START_DOCSTRING,
511
+ )
512
+ class DPRQuestionEncoder(DPRPretrainedQuestionEncoder):
513
+ def __init__(self, config: DPRConfig):
514
+ super().__init__(config)
515
+ self.config = config
516
+ self.question_encoder = DPREncoder(config)
517
+ # Initialize weights and apply final processing
518
+ self.post_init()
519
+
520
+ @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING)
521
+ @replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC)
522
+ def forward(
523
+ self,
524
+ input_ids: Optional[Tensor] = None,
525
+ attention_mask: Optional[Tensor] = None,
526
+ token_type_ids: Optional[Tensor] = None,
527
+ inputs_embeds: Optional[Tensor] = None,
528
+ output_attentions: Optional[bool] = None,
529
+ output_hidden_states: Optional[bool] = None,
530
+ return_dict: Optional[bool] = None,
531
+ ) -> Union[DPRQuestionEncoderOutput, Tuple[Tensor, ...]]:
532
+ r"""
533
+ Return:
534
+
535
+ Examples:
536
+
537
+ ```python
538
+ >>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
539
+
540
+ >>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
541
+ >>> model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
542
+ >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
543
+ >>> embeddings = model(input_ids).pooler_output
544
+ ```
545
+ """
546
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
547
+ output_hidden_states = (
548
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
549
+ )
550
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
551
+
552
+ if input_ids is not None and inputs_embeds is not None:
553
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
554
+ elif input_ids is not None:
555
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
556
+ input_shape = input_ids.size()
557
+ elif inputs_embeds is not None:
558
+ input_shape = inputs_embeds.size()[:-1]
559
+ else:
560
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
561
+
562
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
563
+
564
+ if attention_mask is None:
565
+ attention_mask = (
566
+ torch.ones(input_shape, device=device)
567
+ if input_ids is None
568
+ else (input_ids != self.config.pad_token_id)
569
+ )
570
+ if token_type_ids is None:
571
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
572
+
573
+ outputs = self.question_encoder(
574
+ input_ids=input_ids,
575
+ attention_mask=attention_mask,
576
+ token_type_ids=token_type_ids,
577
+ inputs_embeds=inputs_embeds,
578
+ output_attentions=output_attentions,
579
+ output_hidden_states=output_hidden_states,
580
+ return_dict=return_dict,
581
+ )
582
+
583
+ if not return_dict:
584
+ return outputs[1:]
585
+ return DPRQuestionEncoderOutput(
586
+ pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
587
+ )
588
+
589
+
590
+ @add_start_docstrings(
591
+ "The bare DPRReader transformer outputting span predictions.",
592
+ DPR_START_DOCSTRING,
593
+ )
594
+ class DPRReader(DPRPretrainedReader):
595
+ def __init__(self, config: DPRConfig):
596
+ super().__init__(config)
597
+ self.config = config
598
+ self.span_predictor = DPRSpanPredictor(config)
599
+ # Initialize weights and apply final processing
600
+ self.post_init()
601
+
602
+ @add_start_docstrings_to_model_forward(DPR_READER_INPUTS_DOCSTRING)
603
+ @replace_return_docstrings(output_type=DPRReaderOutput, config_class=_CONFIG_FOR_DOC)
604
+ def forward(
605
+ self,
606
+ input_ids: Optional[Tensor] = None,
607
+ attention_mask: Optional[Tensor] = None,
608
+ inputs_embeds: Optional[Tensor] = None,
609
+ output_attentions: Optional[bool] = None,
610
+ output_hidden_states: Optional[bool] = None,
611
+ return_dict: Optional[bool] = None,
612
+ ) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]:
613
+ r"""
614
+ Return:
615
+
616
+ Examples:
617
+
618
+ ```python
619
+ >>> from transformers import DPRReader, DPRReaderTokenizer
620
+
621
+ >>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
622
+ >>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
623
+ >>> encoded_inputs = tokenizer(
624
+ ... questions=["What is love ?"],
625
+ ... titles=["Haddaway"],
626
+ ... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
627
+ ... return_tensors="pt",
628
+ ... )
629
+ >>> outputs = model(**encoded_inputs)
630
+ >>> start_logits = outputs.start_logits
631
+ >>> end_logits = outputs.end_logits
632
+ >>> relevance_logits = outputs.relevance_logits
633
+ ```
634
+ """
635
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
636
+ output_hidden_states = (
637
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
638
+ )
639
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
640
+
641
+ if input_ids is not None and inputs_embeds is not None:
642
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
643
+ elif input_ids is not None:
644
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
645
+ input_shape = input_ids.size()
646
+ elif inputs_embeds is not None:
647
+ input_shape = inputs_embeds.size()[:-1]
648
+ else:
649
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
650
+
651
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
652
+
653
+ if attention_mask is None:
654
+ attention_mask = torch.ones(input_shape, device=device)
655
+
656
+ return self.span_predictor(
657
+ input_ids,
658
+ attention_mask,
659
+ inputs_embeds=inputs_embeds,
660
+ output_attentions=output_attentions,
661
+ output_hidden_states=output_hidden_states,
662
+ return_dict=return_dict,
663
+ )
venv/lib/python3.10/site-packages/transformers/models/dpr/modeling_tf_dpr.py ADDED
@@ -0,0 +1,797 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 DPR Authors, The Hugging Face Team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ TensorFlow DPR model for Open Domain Question Answering."""
17
+
18
+ from __future__ import annotations
19
+
20
+ from dataclasses import dataclass
21
+ from typing import Tuple, Union
22
+
23
+ import tensorflow as tf
24
+
25
+ from ...modeling_tf_outputs import TFBaseModelOutputWithPooling
26
+ from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, get_initializer, keras, shape_list, unpack_inputs
27
+ from ...utils import (
28
+ ModelOutput,
29
+ add_start_docstrings,
30
+ add_start_docstrings_to_model_forward,
31
+ logging,
32
+ replace_return_docstrings,
33
+ )
34
+ from ..bert.modeling_tf_bert import TFBertMainLayer
35
+ from .configuration_dpr import DPRConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "DPRConfig"
41
+
42
+
43
+ from ..deprecated._archive_maps import ( # noqa: F401, E402
44
+ TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402
45
+ TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402
46
+ TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, # noqa: F401, E402
47
+ )
48
+
49
+
50
+ ##########
51
+ # Outputs
52
+ ##########
53
+
54
+
55
+ @dataclass
56
+ class TFDPRContextEncoderOutput(ModelOutput):
57
+ r"""
58
+ Class for outputs of [`TFDPRContextEncoder`].
59
+
60
+ Args:
61
+ pooler_output (`tf.Tensor` of shape `(batch_size, embeddings_size)`):
62
+ The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer
63
+ hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
64
+ This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.
65
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
66
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
67
+ `(batch_size, sequence_length, hidden_size)`.
68
+
69
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
70
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
71
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
72
+ sequence_length)`.
73
+
74
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
75
+ heads.
76
+ """
77
+
78
+ pooler_output: tf.Tensor = None
79
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
80
+ attentions: Tuple[tf.Tensor, ...] | None = None
81
+
82
+
83
+ @dataclass
84
+ class TFDPRQuestionEncoderOutput(ModelOutput):
85
+ """
86
+ Class for outputs of [`TFDPRQuestionEncoder`].
87
+
88
+ Args:
89
+ pooler_output (`tf.Tensor` of shape `(batch_size, embeddings_size)`):
90
+ The DPR encoder outputs the *pooler_output* that corresponds to the question representation. Last layer
91
+ hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
92
+ This output is to be used to embed questions for nearest neighbors queries with context embeddings.
93
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
94
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
95
+ `(batch_size, sequence_length, hidden_size)`.
96
+
97
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
98
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
99
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
100
+ sequence_length)`.
101
+
102
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
103
+ heads.
104
+ """
105
+
106
+ pooler_output: tf.Tensor = None
107
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
108
+ attentions: Tuple[tf.Tensor, ...] | None = None
109
+
110
+
111
+ @dataclass
112
+ class TFDPRReaderOutput(ModelOutput):
113
+ """
114
+ Class for outputs of [`TFDPRReaderEncoder`].
115
+
116
+ Args:
117
+ start_logits (`tf.Tensor` of shape `(n_passages, sequence_length)`):
118
+ Logits of the start index of the span for each passage.
119
+ end_logits (`tf.Tensor` of shape `(n_passages, sequence_length)`):
120
+ Logits of the end index of the span for each passage.
121
+ relevance_logits (`tf.Tensor` of shape `(n_passages, )`):
122
+ Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the
123
+ question, compared to all the other passages.
124
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
125
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
126
+ `(batch_size, sequence_length, hidden_size)`.
127
+
128
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
129
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
130
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
131
+ sequence_length)`.
132
+
133
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
134
+ heads.
135
+ """
136
+
137
+ start_logits: tf.Tensor = None
138
+ end_logits: tf.Tensor = None
139
+ relevance_logits: tf.Tensor = None
140
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
141
+ attentions: Tuple[tf.Tensor, ...] | None = None
142
+
143
+
144
+ class TFDPREncoderLayer(keras.layers.Layer):
145
+ base_model_prefix = "bert_model"
146
+
147
+ def __init__(self, config: DPRConfig, **kwargs):
148
+ super().__init__(**kwargs)
149
+
150
+ # resolve name conflict with TFBertMainLayer instead of TFBertModel
151
+ self.bert_model = TFBertMainLayer(config, add_pooling_layer=False, name="bert_model")
152
+ self.config = config
153
+
154
+ if self.config.hidden_size <= 0:
155
+ raise ValueError("Encoder hidden_size can't be zero")
156
+ self.projection_dim = config.projection_dim
157
+ if self.projection_dim > 0:
158
+ self.encode_proj = keras.layers.Dense(
159
+ config.projection_dim, kernel_initializer=get_initializer(config.initializer_range), name="encode_proj"
160
+ )
161
+
162
+ @unpack_inputs
163
+ def call(
164
+ self,
165
+ input_ids: tf.Tensor = None,
166
+ attention_mask: tf.Tensor | None = None,
167
+ token_type_ids: tf.Tensor | None = None,
168
+ inputs_embeds: tf.Tensor | None = None,
169
+ output_attentions: bool = None,
170
+ output_hidden_states: bool = None,
171
+ return_dict: bool = None,
172
+ training: bool = False,
173
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor, ...]]:
174
+ outputs = self.bert_model(
175
+ input_ids=input_ids,
176
+ attention_mask=attention_mask,
177
+ token_type_ids=token_type_ids,
178
+ inputs_embeds=inputs_embeds,
179
+ output_attentions=output_attentions,
180
+ output_hidden_states=output_hidden_states,
181
+ return_dict=return_dict,
182
+ training=training,
183
+ )
184
+
185
+ sequence_output = outputs[0]
186
+ pooled_output = sequence_output[:, 0, :]
187
+ if self.projection_dim > 0:
188
+ pooled_output = self.encode_proj(pooled_output)
189
+
190
+ if not return_dict:
191
+ return (sequence_output, pooled_output) + outputs[1:]
192
+
193
+ return TFBaseModelOutputWithPooling(
194
+ last_hidden_state=sequence_output,
195
+ pooler_output=pooled_output,
196
+ hidden_states=outputs.hidden_states,
197
+ attentions=outputs.attentions,
198
+ )
199
+
200
+ @property
201
+ def embeddings_size(self) -> int:
202
+ if self.projection_dim > 0:
203
+ return self.projection_dim
204
+ return self.bert_model.config.hidden_size
205
+
206
+ def build(self, input_shape=None):
207
+ if self.built:
208
+ return
209
+ self.built = True
210
+ if getattr(self, "bert_model", None) is not None:
211
+ with tf.name_scope(self.bert_model.name):
212
+ self.bert_model.build(None)
213
+ if getattr(self, "encode_proj", None) is not None:
214
+ with tf.name_scope(self.encode_proj.name):
215
+ self.encode_proj.build(None)
216
+
217
+
218
+ class TFDPRSpanPredictorLayer(keras.layers.Layer):
219
+ base_model_prefix = "encoder"
220
+
221
+ def __init__(self, config: DPRConfig, **kwargs):
222
+ super().__init__(**kwargs)
223
+ self.config = config
224
+ self.encoder = TFDPREncoderLayer(config, name="encoder")
225
+
226
+ self.qa_outputs = keras.layers.Dense(
227
+ 2, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
228
+ )
229
+ self.qa_classifier = keras.layers.Dense(
230
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="qa_classifier"
231
+ )
232
+
233
+ @unpack_inputs
234
+ def call(
235
+ self,
236
+ input_ids: tf.Tensor = None,
237
+ attention_mask: tf.Tensor | None = None,
238
+ inputs_embeds: tf.Tensor | None = None,
239
+ output_attentions: bool = False,
240
+ output_hidden_states: bool = False,
241
+ return_dict: bool = False,
242
+ training: bool = False,
243
+ ) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]:
244
+ # notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
245
+ n_passages, sequence_length = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:2]
246
+ # feed encoder
247
+ outputs = self.encoder(
248
+ input_ids=input_ids,
249
+ attention_mask=attention_mask,
250
+ inputs_embeds=inputs_embeds,
251
+ output_attentions=output_attentions,
252
+ output_hidden_states=output_hidden_states,
253
+ return_dict=return_dict,
254
+ training=training,
255
+ )
256
+ sequence_output = outputs[0]
257
+
258
+ # compute logits
259
+ logits = self.qa_outputs(sequence_output)
260
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
261
+ start_logits = tf.squeeze(start_logits, axis=-1)
262
+ end_logits = tf.squeeze(end_logits, axis=-1)
263
+ relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
264
+
265
+ # resize
266
+ start_logits = tf.reshape(start_logits, [n_passages, sequence_length])
267
+ end_logits = tf.reshape(end_logits, [n_passages, sequence_length])
268
+ relevance_logits = tf.reshape(relevance_logits, [n_passages])
269
+
270
+ if not return_dict:
271
+ return (start_logits, end_logits, relevance_logits) + outputs[2:]
272
+
273
+ return TFDPRReaderOutput(
274
+ start_logits=start_logits,
275
+ end_logits=end_logits,
276
+ relevance_logits=relevance_logits,
277
+ hidden_states=outputs.hidden_states,
278
+ attentions=outputs.attentions,
279
+ )
280
+
281
+ def build(self, input_shape=None):
282
+ if self.built:
283
+ return
284
+ self.built = True
285
+ if getattr(self, "encoder", None) is not None:
286
+ with tf.name_scope(self.encoder.name):
287
+ self.encoder.build(None)
288
+ if getattr(self, "qa_outputs", None) is not None:
289
+ with tf.name_scope(self.qa_outputs.name):
290
+ self.qa_outputs.build([None, None, self.encoder.embeddings_size])
291
+ if getattr(self, "qa_classifier", None) is not None:
292
+ with tf.name_scope(self.qa_classifier.name):
293
+ self.qa_classifier.build([None, None, self.encoder.embeddings_size])
294
+
295
+
296
+ class TFDPRSpanPredictor(TFPreTrainedModel):
297
+ base_model_prefix = "encoder"
298
+
299
+ def __init__(self, config: DPRConfig, **kwargs):
300
+ super().__init__(config, **kwargs)
301
+ self.encoder = TFDPRSpanPredictorLayer(config)
302
+
303
+ @unpack_inputs
304
+ def call(
305
+ self,
306
+ input_ids: tf.Tensor = None,
307
+ attention_mask: tf.Tensor | None = None,
308
+ token_type_ids: tf.Tensor | None = None,
309
+ inputs_embeds: tf.Tensor | None = None,
310
+ output_attentions: bool = False,
311
+ output_hidden_states: bool = False,
312
+ return_dict: bool = False,
313
+ training: bool = False,
314
+ ) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]:
315
+ outputs = self.encoder(
316
+ input_ids=input_ids,
317
+ attention_mask=attention_mask,
318
+ inputs_embeds=inputs_embeds,
319
+ output_attentions=output_attentions,
320
+ output_hidden_states=output_hidden_states,
321
+ return_dict=return_dict,
322
+ training=training,
323
+ )
324
+
325
+ return outputs
326
+
327
+
328
+ class TFDPREncoder(TFPreTrainedModel):
329
+ base_model_prefix = "encoder"
330
+
331
+ def __init__(self, config: DPRConfig, **kwargs):
332
+ super().__init__(config, **kwargs)
333
+
334
+ self.encoder = TFDPREncoderLayer(config)
335
+
336
+ @unpack_inputs
337
+ def call(
338
+ self,
339
+ input_ids: tf.Tensor = None,
340
+ attention_mask: tf.Tensor | None = None,
341
+ token_type_ids: tf.Tensor | None = None,
342
+ inputs_embeds: tf.Tensor | None = None,
343
+ output_attentions: bool = False,
344
+ output_hidden_states: bool = False,
345
+ return_dict: bool = False,
346
+ training: bool = False,
347
+ ) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]:
348
+ outputs = self.encoder(
349
+ input_ids=input_ids,
350
+ attention_mask=attention_mask,
351
+ inputs_embeds=inputs_embeds,
352
+ output_attentions=output_attentions,
353
+ output_hidden_states=output_hidden_states,
354
+ return_dict=return_dict,
355
+ training=training,
356
+ )
357
+ return outputs
358
+
359
+
360
+ ##################
361
+ # PreTrainedModel
362
+ ##################
363
+
364
+
365
+ class TFDPRPretrainedContextEncoder(TFPreTrainedModel):
366
+ """
367
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
368
+ models.
369
+ """
370
+
371
+ config_class = DPRConfig
372
+ base_model_prefix = "ctx_encoder"
373
+
374
+
375
+ class TFDPRPretrainedQuestionEncoder(TFPreTrainedModel):
376
+ """
377
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
378
+ models.
379
+ """
380
+
381
+ config_class = DPRConfig
382
+ base_model_prefix = "question_encoder"
383
+
384
+
385
+ class TFDPRPretrainedReader(TFPreTrainedModel):
386
+ """
387
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
388
+ models.
389
+ """
390
+
391
+ config_class = DPRConfig
392
+ base_model_prefix = "reader"
393
+
394
+
395
+ ###############
396
+ # Actual Models
397
+ ###############
398
+
399
+
400
+ TF_DPR_START_DOCSTRING = r"""
401
+
402
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
403
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
404
+ etc.)
405
+
406
+ This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
407
+ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to
408
+ general usage and behavior.
409
+
410
+ <Tip>
411
+
412
+ TensorFlow models and layers in `transformers` accept two formats as input:
413
+
414
+ - having all inputs as keyword arguments (like PyTorch models), or
415
+ - having all inputs as a list, tuple or dict in the first positional argument.
416
+
417
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
418
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
419
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
420
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
421
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
422
+ positional argument:
423
+
424
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
425
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
426
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
427
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
428
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
429
+
430
+ Note that when creating models and layers with
431
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
432
+ about any of this, as you can just pass inputs like you would to any other Python function!
433
+
434
+ </Tip>
435
+
436
+ Parameters:
437
+ config ([`DPRConfig`]): Model configuration class with all the parameters of the model.
438
+ Initializing with a config file does not load the weights associated with the model, only the
439
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
440
+ """
441
+
442
+ TF_DPR_ENCODERS_INPUTS_DOCSTRING = r"""
443
+ Args:
444
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
445
+ Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
446
+ formatted with [CLS] and [SEP] tokens as follows:
447
+
448
+ (a) For sequence pairs (for a pair title+text for example):
449
+
450
+ ```
451
+ tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
452
+ token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
453
+ ```
454
+
455
+ (b) For single sequences (for a question for example):
456
+
457
+ ```
458
+ tokens: [CLS] the dog is hairy . [SEP]
459
+ token_type_ids: 0 0 0 0 0 0 0
460
+ ```
461
+
462
+ DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
463
+ rather than the left.
464
+
465
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
466
+ [`PreTrainedTokenizer.__call__`] for details.
467
+
468
+ [What are input IDs?](../glossary#input-ids)
469
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
470
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
471
+
472
+ - 1 for tokens that are **not masked**,
473
+ - 0 for tokens that are **masked**.
474
+
475
+ [What are attention masks?](../glossary#attention-mask)
476
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
477
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
478
+ 1]`:
479
+
480
+ - 0 corresponds to a *sentence A* token,
481
+ - 1 corresponds to a *sentence B* token.
482
+
483
+ [What are token type IDs?](../glossary#token-type-ids)
484
+ inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
485
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
486
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
487
+ model's internal embedding lookup matrix.
488
+ output_attentions (`bool`, *optional*):
489
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
490
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
491
+ config will be used instead.
492
+ output_hidden_states (`bool`, *optional*):
493
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
494
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
495
+ used instead.
496
+ return_dict (`bool`, *optional*):
497
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
498
+ eager mode, in graph mode the value will always be set to True.
499
+ training (`bool`, *optional*, defaults to `False`):
500
+ Whether or not to use the model in training mode (some modules like dropout modules have different
501
+ behaviors between training and evaluation).
502
+ """
503
+
504
+ TF_DPR_READER_INPUTS_DOCSTRING = r"""
505
+ Args:
506
+ input_ids (`Numpy array` or `tf.Tensor` of shapes `(n_passages, sequence_length)`):
507
+ Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question
508
+ and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should
509
+ be formatted with [CLS] and [SEP] with the format:
510
+
511
+ `[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>`
512
+
513
+ DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
514
+ rather than the left.
515
+
516
+ Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details.
517
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length)`, *optional*):
518
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
519
+
520
+ - 1 for tokens that are **not masked**,
521
+ - 0 for tokens that are **masked**.
522
+
523
+ [What are attention masks?](../glossary#attention-mask)
524
+ inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*):
525
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
526
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
527
+ model's internal embedding lookup matrix.
528
+ output_hidden_states (`bool`, *optional*):
529
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
530
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
531
+ used instead.
532
+ return_dict (`bool`, *optional*):
533
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
534
+ eager mode, in graph mode the value will always be set to True.
535
+ training (`bool`, *optional*, defaults to `False`):
536
+ Whether or not to use the model in training mode (some modules like dropout modules have different
537
+ behaviors between training and evaluation).
538
+ """
539
+
540
+
541
+ @add_start_docstrings(
542
+ "The bare DPRContextEncoder transformer outputting pooler outputs as context representations.",
543
+ TF_DPR_START_DOCSTRING,
544
+ )
545
+ class TFDPRContextEncoder(TFDPRPretrainedContextEncoder):
546
+ def __init__(self, config: DPRConfig, *args, **kwargs):
547
+ super().__init__(config, *args, **kwargs)
548
+ self.ctx_encoder = TFDPREncoderLayer(config, name="ctx_encoder")
549
+
550
+ def get_input_embeddings(self):
551
+ try:
552
+ return self.ctx_encoder.bert_model.get_input_embeddings()
553
+ except AttributeError:
554
+ self.build()
555
+ return self.ctx_encoder.bert_model.get_input_embeddings()
556
+
557
+ @unpack_inputs
558
+ @add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING)
559
+ @replace_return_docstrings(output_type=TFDPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC)
560
+ def call(
561
+ self,
562
+ input_ids: TFModelInputType | None = None,
563
+ attention_mask: tf.Tensor | None = None,
564
+ token_type_ids: tf.Tensor | None = None,
565
+ inputs_embeds: tf.Tensor | None = None,
566
+ output_attentions: bool | None = None,
567
+ output_hidden_states: bool | None = None,
568
+ return_dict: bool | None = None,
569
+ training: bool = False,
570
+ ) -> TFDPRContextEncoderOutput | Tuple[tf.Tensor, ...]:
571
+ r"""
572
+ Return:
573
+
574
+ Examples:
575
+
576
+ ```python
577
+ >>> from transformers import TFDPRContextEncoder, DPRContextEncoderTokenizer
578
+
579
+ >>> tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
580
+ >>> model = TFDPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", from_pt=True)
581
+ >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="tf")["input_ids"]
582
+ >>> embeddings = model(input_ids).pooler_output
583
+ ```
584
+ """
585
+ if input_ids is not None and inputs_embeds is not None:
586
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
587
+ elif input_ids is not None:
588
+ input_shape = shape_list(input_ids)
589
+ elif inputs_embeds is not None:
590
+ input_shape = shape_list(inputs_embeds)[:-1]
591
+ else:
592
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
593
+
594
+ if attention_mask is None:
595
+ attention_mask = (
596
+ tf.ones(input_shape, dtype=tf.dtypes.int32)
597
+ if input_ids is None
598
+ else (input_ids != self.config.pad_token_id)
599
+ )
600
+ if token_type_ids is None:
601
+ token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32)
602
+
603
+ outputs = self.ctx_encoder(
604
+ input_ids=input_ids,
605
+ attention_mask=attention_mask,
606
+ token_type_ids=token_type_ids,
607
+ inputs_embeds=inputs_embeds,
608
+ output_attentions=output_attentions,
609
+ output_hidden_states=output_hidden_states,
610
+ return_dict=return_dict,
611
+ training=training,
612
+ )
613
+
614
+ if not return_dict:
615
+ return outputs[1:]
616
+
617
+ return TFDPRContextEncoderOutput(
618
+ pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
619
+ )
620
+
621
+ def build(self, input_shape=None):
622
+ if self.built:
623
+ return
624
+ self.built = True
625
+ if getattr(self, "ctx_encoder", None) is not None:
626
+ with tf.name_scope(self.ctx_encoder.name):
627
+ self.ctx_encoder.build(None)
628
+
629
+
630
+ @add_start_docstrings(
631
+ "The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.",
632
+ TF_DPR_START_DOCSTRING,
633
+ )
634
+ class TFDPRQuestionEncoder(TFDPRPretrainedQuestionEncoder):
635
+ def __init__(self, config: DPRConfig, *args, **kwargs):
636
+ super().__init__(config, *args, **kwargs)
637
+ self.question_encoder = TFDPREncoderLayer(config, name="question_encoder")
638
+
639
+ def get_input_embeddings(self):
640
+ try:
641
+ return self.question_encoder.bert_model.get_input_embeddings()
642
+ except AttributeError:
643
+ self.build()
644
+ return self.question_encoder.bert_model.get_input_embeddings()
645
+
646
+ @unpack_inputs
647
+ @add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING)
648
+ @replace_return_docstrings(output_type=TFDPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC)
649
+ def call(
650
+ self,
651
+ input_ids: TFModelInputType | None = None,
652
+ attention_mask: tf.Tensor | None = None,
653
+ token_type_ids: tf.Tensor | None = None,
654
+ inputs_embeds: tf.Tensor | None = None,
655
+ output_attentions: bool | None = None,
656
+ output_hidden_states: bool | None = None,
657
+ return_dict: bool | None = None,
658
+ training: bool = False,
659
+ ) -> TFDPRQuestionEncoderOutput | Tuple[tf.Tensor, ...]:
660
+ r"""
661
+ Return:
662
+
663
+ Examples:
664
+
665
+ ```python
666
+ >>> from transformers import TFDPRQuestionEncoder, DPRQuestionEncoderTokenizer
667
+
668
+ >>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
669
+ >>> model = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", from_pt=True)
670
+ >>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="tf")["input_ids"]
671
+ >>> embeddings = model(input_ids).pooler_output
672
+ ```
673
+ """
674
+ if input_ids is not None and inputs_embeds is not None:
675
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
676
+ elif input_ids is not None:
677
+ input_shape = shape_list(input_ids)
678
+ elif inputs_embeds is not None:
679
+ input_shape = shape_list(inputs_embeds)[:-1]
680
+ else:
681
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
682
+
683
+ if attention_mask is None:
684
+ attention_mask = (
685
+ tf.ones(input_shape, dtype=tf.dtypes.int32)
686
+ if input_ids is None
687
+ else (input_ids != self.config.pad_token_id)
688
+ )
689
+ if token_type_ids is None:
690
+ token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32)
691
+
692
+ outputs = self.question_encoder(
693
+ input_ids=input_ids,
694
+ attention_mask=attention_mask,
695
+ token_type_ids=token_type_ids,
696
+ inputs_embeds=inputs_embeds,
697
+ output_attentions=output_attentions,
698
+ output_hidden_states=output_hidden_states,
699
+ return_dict=return_dict,
700
+ training=training,
701
+ )
702
+
703
+ if not return_dict:
704
+ return outputs[1:]
705
+ return TFDPRQuestionEncoderOutput(
706
+ pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
707
+ )
708
+
709
+ def build(self, input_shape=None):
710
+ if self.built:
711
+ return
712
+ self.built = True
713
+ if getattr(self, "question_encoder", None) is not None:
714
+ with tf.name_scope(self.question_encoder.name):
715
+ self.question_encoder.build(None)
716
+
717
+
718
+ @add_start_docstrings(
719
+ "The bare DPRReader transformer outputting span predictions.",
720
+ TF_DPR_START_DOCSTRING,
721
+ )
722
+ class TFDPRReader(TFDPRPretrainedReader):
723
+ def __init__(self, config: DPRConfig, *args, **kwargs):
724
+ super().__init__(config, *args, **kwargs)
725
+ self.span_predictor = TFDPRSpanPredictorLayer(config, name="span_predictor")
726
+
727
+ def get_input_embeddings(self):
728
+ try:
729
+ return self.span_predictor.encoder.bert_model.get_input_embeddings()
730
+ except AttributeError:
731
+ self.build()
732
+ return self.span_predictor.encoder.bert_model.get_input_embeddings()
733
+
734
+ @unpack_inputs
735
+ @add_start_docstrings_to_model_forward(TF_DPR_READER_INPUTS_DOCSTRING)
736
+ @replace_return_docstrings(output_type=TFDPRReaderOutput, config_class=_CONFIG_FOR_DOC)
737
+ def call(
738
+ self,
739
+ input_ids: TFModelInputType | None = None,
740
+ attention_mask: tf.Tensor | None = None,
741
+ inputs_embeds: tf.Tensor | None = None,
742
+ output_attentions: bool | None = None,
743
+ output_hidden_states: bool | None = None,
744
+ return_dict: bool | None = None,
745
+ training: bool = False,
746
+ ) -> TFDPRReaderOutput | Tuple[tf.Tensor, ...]:
747
+ r"""
748
+ Return:
749
+
750
+ Examples:
751
+
752
+ ```python
753
+ >>> from transformers import TFDPRReader, DPRReaderTokenizer
754
+
755
+ >>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
756
+ >>> model = TFDPRReader.from_pretrained("facebook/dpr-reader-single-nq-base", from_pt=True)
757
+ >>> encoded_inputs = tokenizer(
758
+ ... questions=["What is love ?"],
759
+ ... titles=["Haddaway"],
760
+ ... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
761
+ ... return_tensors="tf",
762
+ ... )
763
+ >>> outputs = model(encoded_inputs)
764
+ >>> start_logits = outputs.start_logits
765
+ >>> end_logits = outputs.end_logits
766
+ >>> relevance_logits = outputs.relevance_logits
767
+ ```
768
+ """
769
+ if input_ids is not None and inputs_embeds is not None:
770
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
771
+ elif input_ids is not None:
772
+ input_shape = shape_list(input_ids)
773
+ elif inputs_embeds is not None:
774
+ input_shape = shape_list(inputs_embeds)[:-1]
775
+ else:
776
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
777
+
778
+ if attention_mask is None:
779
+ attention_mask = tf.ones(input_shape, dtype=tf.dtypes.int32)
780
+
781
+ return self.span_predictor(
782
+ input_ids=input_ids,
783
+ attention_mask=attention_mask,
784
+ inputs_embeds=inputs_embeds,
785
+ output_attentions=output_attentions,
786
+ output_hidden_states=output_hidden_states,
787
+ return_dict=return_dict,
788
+ training=training,
789
+ )
790
+
791
+ def build(self, input_shape=None):
792
+ if self.built:
793
+ return
794
+ self.built = True
795
+ if getattr(self, "span_predictor", None) is not None:
796
+ with tf.name_scope(self.span_predictor.name):
797
+ self.span_predictor.build(None)
venv/lib/python3.10/site-packages/transformers/models/dpr/tokenization_dpr.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, The Hugging Face Team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DPR."""
16
+
17
+
18
+ import collections
19
+ from typing import List, Optional, Union
20
+
21
+ from ...tokenization_utils_base import BatchEncoding
22
+ from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
23
+ from ..bert.tokenization_bert import BertTokenizer
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
29
+
30
+
31
+ class DPRContextEncoderTokenizer(BertTokenizer):
32
+ r"""
33
+ Construct a DPRContextEncoder tokenizer.
34
+
35
+ [`DPRContextEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
36
+ splitting and wordpiece.
37
+
38
+ Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
39
+ """
40
+
41
+ vocab_files_names = VOCAB_FILES_NAMES
42
+
43
+
44
+ class DPRQuestionEncoderTokenizer(BertTokenizer):
45
+ r"""
46
+ Constructs a DPRQuestionEncoder tokenizer.
47
+
48
+ [`DPRQuestionEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
49
+ splitting and wordpiece.
50
+
51
+ Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
52
+ """
53
+
54
+ vocab_files_names = VOCAB_FILES_NAMES
55
+
56
+
57
+ DPRSpanPrediction = collections.namedtuple(
58
+ "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
59
+ )
60
+
61
+ DPRReaderOutput = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
62
+
63
+
64
+ CUSTOM_DPR_READER_DOCSTRING = r"""
65
+ Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
66
+ It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
67
+ using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
68
+ with the format:
69
+
70
+ ```
71
+ [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
72
+ ```
73
+
74
+ Args:
75
+ questions (`str` or `List[str]`):
76
+ The questions to be encoded. You can specify one question for many passages. In this case, the question
77
+ will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
78
+ `titles` or `texts`.
79
+ titles (`str` or `List[str]`):
80
+ The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
81
+ texts (`str` or `List[str]`):
82
+ The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
83
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
84
+ Activates and controls padding. Accepts the following values:
85
+
86
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
87
+ if provided).
88
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
89
+ acceptable input length for the model if that argument is not provided.
90
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
91
+ lengths).
92
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
93
+ Activates and controls truncation. Accepts the following values:
94
+
95
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
96
+ the maximum acceptable input length for the model if that argument is not provided. This will truncate
97
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
98
+ of pairs) is provided.
99
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
100
+ acceptable input length for the model if that argument is not provided. This will only truncate the first
101
+ sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
102
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
103
+ acceptable input length for the model if that argument is not provided. This will only truncate the
104
+ second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
105
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
106
+ greater than the model maximum admissible input size).
107
+ max_length (`int`, *optional*):
108
+ Controls the maximum length to use by one of the truncation/padding parameters.
109
+
110
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
111
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
112
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
113
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
114
+ If set, will return tensors instead of list of python integers. Acceptable values are:
115
+
116
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
117
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
118
+ - `'np'`: Return Numpy `np.ndarray` objects.
119
+ return_attention_mask (`bool`, *optional*):
120
+ Whether or not to return the attention mask. If not set, will return the attention mask according to the
121
+ specific tokenizer's default, defined by the `return_outputs` attribute.
122
+
123
+ [What are attention masks?](../glossary#attention-mask)
124
+
125
+ Returns:
126
+ `Dict[str, List[List[int]]]`: A dictionary with the following keys:
127
+
128
+ - `input_ids`: List of token ids to be fed to a model.
129
+ - `attention_mask`: List of indices specifying which tokens should be attended to by the model.
130
+ """
131
+
132
+
133
+ @add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
134
+ class CustomDPRReaderTokenizerMixin:
135
+ def __call__(
136
+ self,
137
+ questions,
138
+ titles: Optional[str] = None,
139
+ texts: Optional[str] = None,
140
+ padding: Union[bool, str] = False,
141
+ truncation: Union[bool, str] = False,
142
+ max_length: Optional[int] = None,
143
+ return_tensors: Optional[Union[str, TensorType]] = None,
144
+ return_attention_mask: Optional[bool] = None,
145
+ **kwargs,
146
+ ) -> BatchEncoding:
147
+ if titles is None and texts is None:
148
+ return super().__call__(
149
+ questions,
150
+ padding=padding,
151
+ truncation=truncation,
152
+ max_length=max_length,
153
+ return_tensors=return_tensors,
154
+ return_attention_mask=return_attention_mask,
155
+ **kwargs,
156
+ )
157
+ elif titles is None or texts is None:
158
+ text_pair = titles if texts is None else texts
159
+ return super().__call__(
160
+ questions,
161
+ text_pair,
162
+ padding=padding,
163
+ truncation=truncation,
164
+ max_length=max_length,
165
+ return_tensors=return_tensors,
166
+ return_attention_mask=return_attention_mask,
167
+ **kwargs,
168
+ )
169
+ titles = titles if not isinstance(titles, str) else [titles]
170
+ texts = texts if not isinstance(texts, str) else [texts]
171
+ n_passages = len(titles)
172
+ questions = questions if not isinstance(questions, str) else [questions] * n_passages
173
+ if len(titles) != len(texts):
174
+ raise ValueError(
175
+ f"There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts."
176
+ )
177
+ encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)["input_ids"]
178
+ encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)["input_ids"]
179
+ encoded_inputs = {
180
+ "input_ids": [
181
+ (encoded_question_and_title + encoded_text)[:max_length]
182
+ if max_length is not None and truncation
183
+ else encoded_question_and_title + encoded_text
184
+ for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)
185
+ ]
186
+ }
187
+ if return_attention_mask is not False:
188
+ attention_mask = []
189
+ for input_ids in encoded_inputs["input_ids"]:
190
+ attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
191
+ encoded_inputs["attention_mask"] = attention_mask
192
+ return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
193
+
194
+ def decode_best_spans(
195
+ self,
196
+ reader_input: BatchEncoding,
197
+ reader_output: DPRReaderOutput,
198
+ num_spans: int = 16,
199
+ max_answer_length: int = 64,
200
+ num_spans_per_passage: int = 4,
201
+ ) -> List[DPRSpanPrediction]:
202
+ """
203
+ Get the span predictions for the extractive Q&A model.
204
+
205
+ Returns: *List* of *DPRReaderOutput* sorted by descending *(relevance_score, span_score)*. Each
206
+ *DPRReaderOutput* is a *Tuple* with:
207
+
208
+ - **span_score**: `float` that corresponds to the score given by the reader for this span compared to other
209
+ spans in the same passage. It corresponds to the sum of the start and end logits of the span.
210
+ - **relevance_score**: `float` that corresponds to the score of the each passage to answer the question,
211
+ compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
212
+ - **doc_id**: `int` the id of the passage. - **start_index**: `int` the start index of the span
213
+ (inclusive). - **end_index**: `int` the end index of the span (inclusive).
214
+
215
+ Examples:
216
+
217
+ ```python
218
+ >>> from transformers import DPRReader, DPRReaderTokenizer
219
+
220
+ >>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
221
+ >>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
222
+ >>> encoded_inputs = tokenizer(
223
+ ... questions=["What is love ?"],
224
+ ... titles=["Haddaway"],
225
+ ... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
226
+ ... return_tensors="pt",
227
+ ... )
228
+ >>> outputs = model(**encoded_inputs)
229
+ >>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
230
+ >>> print(predicted_spans[0].text) # best span
231
+ a song
232
+ ```"""
233
+ input_ids = reader_input["input_ids"]
234
+ start_logits, end_logits, relevance_logits = reader_output[:3]
235
+ n_passages = len(relevance_logits)
236
+ sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
237
+ nbest_spans_predictions: List[DPRReaderOutput] = []
238
+ for doc_id in sorted_docs:
239
+ sequence_ids = list(input_ids[doc_id])
240
+ # assuming question & title information is at the beginning of the sequence
241
+ passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
242
+ if sequence_ids[-1] == self.pad_token_id:
243
+ sequence_len = sequence_ids.index(self.pad_token_id)
244
+ else:
245
+ sequence_len = len(sequence_ids)
246
+
247
+ best_spans = self._get_best_spans(
248
+ start_logits=start_logits[doc_id][passage_offset:sequence_len],
249
+ end_logits=end_logits[doc_id][passage_offset:sequence_len],
250
+ max_answer_length=max_answer_length,
251
+ top_spans=num_spans_per_passage,
252
+ )
253
+ for start_index, end_index in best_spans:
254
+ start_index += passage_offset
255
+ end_index += passage_offset
256
+ nbest_spans_predictions.append(
257
+ DPRSpanPrediction(
258
+ span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],
259
+ relevance_score=relevance_logits[doc_id],
260
+ doc_id=doc_id,
261
+ start_index=start_index,
262
+ end_index=end_index,
263
+ text=self.decode(sequence_ids[start_index : end_index + 1]),
264
+ )
265
+ )
266
+ if len(nbest_spans_predictions) >= num_spans:
267
+ break
268
+ return nbest_spans_predictions[:num_spans]
269
+
270
+ def _get_best_spans(
271
+ self,
272
+ start_logits: List[int],
273
+ end_logits: List[int],
274
+ max_answer_length: int,
275
+ top_spans: int,
276
+ ) -> List[DPRSpanPrediction]:
277
+ """
278
+ Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending
279
+ `span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored.
280
+ """
281
+ scores = []
282
+ for start_index, start_score in enumerate(start_logits):
283
+ for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
284
+ scores.append(((start_index, start_index + answer_length), start_score + end_score))
285
+ scores = sorted(scores, key=lambda x: x[1], reverse=True)
286
+ chosen_span_intervals = []
287
+ for (start_index, end_index), score in scores:
288
+ if start_index > end_index:
289
+ raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]")
290
+ length = end_index - start_index + 1
291
+ if length > max_answer_length:
292
+ raise ValueError(f"Span is too long: {length} > {max_answer_length}")
293
+ if any(
294
+ start_index <= prev_start_index <= prev_end_index <= end_index
295
+ or prev_start_index <= start_index <= end_index <= prev_end_index
296
+ for (prev_start_index, prev_end_index) in chosen_span_intervals
297
+ ):
298
+ continue
299
+ chosen_span_intervals.append((start_index, end_index))
300
+
301
+ if len(chosen_span_intervals) == top_spans:
302
+ break
303
+ return chosen_span_intervals
304
+
305
+
306
+ @add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
307
+ class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer):
308
+ r"""
309
+ Construct a DPRReader tokenizer.
310
+
311
+ [`DPRReaderTokenizer`] is almost identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
312
+ splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are
313
+ combined to be fed to the [`DPRReader`] model.
314
+
315
+ Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
316
+ """
317
+
318
+ vocab_files_names = VOCAB_FILES_NAMES
319
+ model_input_names = ["input_ids", "attention_mask"]
venv/lib/python3.10/site-packages/transformers/models/dpr/tokenization_dpr_fast.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, The Hugging Face Team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DPR."""
16
+
17
+
18
+ import collections
19
+ from typing import List, Optional, Union
20
+
21
+ from ...tokenization_utils_base import BatchEncoding
22
+ from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
23
+ from ..bert.tokenization_bert_fast import BertTokenizerFast
24
+ from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+
32
+ class DPRContextEncoderTokenizerFast(BertTokenizerFast):
33
+ r"""
34
+ Construct a "fast" DPRContextEncoder tokenizer (backed by HuggingFace's *tokenizers* library).
35
+
36
+ [`DPRContextEncoderTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
37
+ punctuation splitting and wordpiece.
38
+
39
+ Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
40
+ """
41
+
42
+ vocab_files_names = VOCAB_FILES_NAMES
43
+ slow_tokenizer_class = DPRContextEncoderTokenizer
44
+
45
+
46
+ class DPRQuestionEncoderTokenizerFast(BertTokenizerFast):
47
+ r"""
48
+ Constructs a "fast" DPRQuestionEncoder tokenizer (backed by HuggingFace's *tokenizers* library).
49
+
50
+ [`DPRQuestionEncoderTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
51
+ punctuation splitting and wordpiece.
52
+
53
+ Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
54
+ """
55
+
56
+ vocab_files_names = VOCAB_FILES_NAMES
57
+ slow_tokenizer_class = DPRQuestionEncoderTokenizer
58
+
59
+
60
+ DPRSpanPrediction = collections.namedtuple(
61
+ "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
62
+ )
63
+
64
+ DPRReaderOutput = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
65
+
66
+
67
+ CUSTOM_DPR_READER_DOCSTRING = r"""
68
+ Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
69
+ It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
70
+ using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
71
+ with the format:
72
+
73
+ [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
74
+
75
+ Args:
76
+ questions (`str` or `List[str]`):
77
+ The questions to be encoded. You can specify one question for many passages. In this case, the question
78
+ will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
79
+ `titles` or `texts`.
80
+ titles (`str` or `List[str]`):
81
+ The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
82
+ texts (`str` or `List[str]`):
83
+ The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
84
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
85
+ Activates and controls padding. Accepts the following values:
86
+
87
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
88
+ if provided).
89
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
90
+ acceptable input length for the model if that argument is not provided.
91
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
92
+ lengths).
93
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
94
+ Activates and controls truncation. Accepts the following values:
95
+
96
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
97
+ the maximum acceptable input length for the model if that argument is not provided. This will truncate
98
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
99
+ of pairs) is provided.
100
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
101
+ acceptable input length for the model if that argument is not provided. This will only truncate the first
102
+ sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
103
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
104
+ acceptable input length for the model if that argument is not provided. This will only truncate the
105
+ second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
106
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
107
+ greater than the model maximum admissible input size).
108
+ max_length (`int`, *optional*):
109
+ Controls the maximum length to use by one of the truncation/padding parameters.
110
+
111
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
112
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
113
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
114
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
115
+ If set, will return tensors instead of list of python integers. Acceptable values are:
116
+
117
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
118
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
119
+ - `'np'`: Return Numpy `np.ndarray` objects.
120
+ return_attention_mask (`bool`, *optional*):
121
+ Whether or not to return the attention mask. If not set, will return the attention mask according to the
122
+ specific tokenizer's default, defined by the `return_outputs` attribute.
123
+
124
+ [What are attention masks?](../glossary#attention-mask)
125
+
126
+ Return:
127
+ `Dict[str, List[List[int]]]`: A dictionary with the following keys:
128
+
129
+ - `input_ids`: List of token ids to be fed to a model.
130
+ - `attention_mask`: List of indices specifying which tokens should be attended to by the model.
131
+ """
132
+
133
+
134
+ @add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
135
+ class CustomDPRReaderTokenizerMixin:
136
+ def __call__(
137
+ self,
138
+ questions,
139
+ titles: Optional[str] = None,
140
+ texts: Optional[str] = None,
141
+ padding: Union[bool, str] = False,
142
+ truncation: Union[bool, str] = False,
143
+ max_length: Optional[int] = None,
144
+ return_tensors: Optional[Union[str, TensorType]] = None,
145
+ return_attention_mask: Optional[bool] = None,
146
+ **kwargs,
147
+ ) -> BatchEncoding:
148
+ if titles is None and texts is None:
149
+ return super().__call__(
150
+ questions,
151
+ padding=padding,
152
+ truncation=truncation,
153
+ max_length=max_length,
154
+ return_tensors=return_tensors,
155
+ return_attention_mask=return_attention_mask,
156
+ **kwargs,
157
+ )
158
+ elif titles is None or texts is None:
159
+ text_pair = titles if texts is None else texts
160
+ return super().__call__(
161
+ questions,
162
+ text_pair,
163
+ padding=padding,
164
+ truncation=truncation,
165
+ max_length=max_length,
166
+ return_tensors=return_tensors,
167
+ return_attention_mask=return_attention_mask,
168
+ **kwargs,
169
+ )
170
+ titles = titles if not isinstance(titles, str) else [titles]
171
+ texts = texts if not isinstance(texts, str) else [texts]
172
+ n_passages = len(titles)
173
+ questions = questions if not isinstance(questions, str) else [questions] * n_passages
174
+ assert len(titles) == len(
175
+ texts
176
+ ), f"There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts."
177
+ encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)["input_ids"]
178
+ encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)["input_ids"]
179
+ encoded_inputs = {
180
+ "input_ids": [
181
+ (encoded_question_and_title + encoded_text)[:max_length]
182
+ if max_length is not None and truncation
183
+ else encoded_question_and_title + encoded_text
184
+ for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)
185
+ ]
186
+ }
187
+ if return_attention_mask is not False:
188
+ attention_mask = []
189
+ for input_ids in encoded_inputs["input_ids"]:
190
+ attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
191
+ encoded_inputs["attention_mask"] = attention_mask
192
+ return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
193
+
194
+ def decode_best_spans(
195
+ self,
196
+ reader_input: BatchEncoding,
197
+ reader_output: DPRReaderOutput,
198
+ num_spans: int = 16,
199
+ max_answer_length: int = 64,
200
+ num_spans_per_passage: int = 4,
201
+ ) -> List[DPRSpanPrediction]:
202
+ """
203
+ Get the span predictions for the extractive Q&A model.
204
+
205
+ Returns: *List* of *DPRReaderOutput* sorted by descending *(relevance_score, span_score)*. Each
206
+ *DPRReaderOutput* is a *Tuple* with:
207
+
208
+ - **span_score**: `float` that corresponds to the score given by the reader for this span compared to other
209
+ spans in the same passage. It corresponds to the sum of the start and end logits of the span.
210
+ - **relevance_score**: `float` that corresponds to the score of the each passage to answer the question,
211
+ compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
212
+ - **doc_id**: `int` the id of the passage. - ***start_index**: `int` the start index of the span
213
+ (inclusive). - **end_index**: `int` the end index of the span (inclusive).
214
+
215
+ Examples:
216
+
217
+ ```python
218
+ >>> from transformers import DPRReader, DPRReaderTokenizer
219
+
220
+ >>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
221
+ >>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
222
+ >>> encoded_inputs = tokenizer(
223
+ ... questions=["What is love ?"],
224
+ ... titles=["Haddaway"],
225
+ ... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
226
+ ... return_tensors="pt",
227
+ ... )
228
+ >>> outputs = model(**encoded_inputs)
229
+ >>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
230
+ >>> print(predicted_spans[0].text) # best span
231
+ a song
232
+ ```"""
233
+ input_ids = reader_input["input_ids"]
234
+ start_logits, end_logits, relevance_logits = reader_output[:3]
235
+ n_passages = len(relevance_logits)
236
+ sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
237
+ nbest_spans_predictions: List[DPRReaderOutput] = []
238
+ for doc_id in sorted_docs:
239
+ sequence_ids = list(input_ids[doc_id])
240
+ # assuming question & title information is at the beginning of the sequence
241
+ passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
242
+ if sequence_ids[-1] == self.pad_token_id:
243
+ sequence_len = sequence_ids.index(self.pad_token_id)
244
+ else:
245
+ sequence_len = len(sequence_ids)
246
+
247
+ best_spans = self._get_best_spans(
248
+ start_logits=start_logits[doc_id][passage_offset:sequence_len],
249
+ end_logits=end_logits[doc_id][passage_offset:sequence_len],
250
+ max_answer_length=max_answer_length,
251
+ top_spans=num_spans_per_passage,
252
+ )
253
+ for start_index, end_index in best_spans:
254
+ start_index += passage_offset
255
+ end_index += passage_offset
256
+ nbest_spans_predictions.append(
257
+ DPRSpanPrediction(
258
+ span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],
259
+ relevance_score=relevance_logits[doc_id],
260
+ doc_id=doc_id,
261
+ start_index=start_index,
262
+ end_index=end_index,
263
+ text=self.decode(sequence_ids[start_index : end_index + 1]),
264
+ )
265
+ )
266
+ if len(nbest_spans_predictions) >= num_spans:
267
+ break
268
+ return nbest_spans_predictions[:num_spans]
269
+
270
+ def _get_best_spans(
271
+ self,
272
+ start_logits: List[int],
273
+ end_logits: List[int],
274
+ max_answer_length: int,
275
+ top_spans: int,
276
+ ) -> List[DPRSpanPrediction]:
277
+ """
278
+ Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending
279
+ `span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored.
280
+ """
281
+ scores = []
282
+ for start_index, start_score in enumerate(start_logits):
283
+ for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
284
+ scores.append(((start_index, start_index + answer_length), start_score + end_score))
285
+ scores = sorted(scores, key=lambda x: x[1], reverse=True)
286
+ chosen_span_intervals = []
287
+ for (start_index, end_index), score in scores:
288
+ assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
289
+ length = end_index - start_index + 1
290
+ assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
291
+ if any(
292
+ start_index <= prev_start_index <= prev_end_index <= end_index
293
+ or prev_start_index <= start_index <= end_index <= prev_end_index
294
+ for (prev_start_index, prev_end_index) in chosen_span_intervals
295
+ ):
296
+ continue
297
+ chosen_span_intervals.append((start_index, end_index))
298
+
299
+ if len(chosen_span_intervals) == top_spans:
300
+ break
301
+ return chosen_span_intervals
302
+
303
+
304
+ @add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
305
+ class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast):
306
+ r"""
307
+ Constructs a "fast" DPRReader tokenizer (backed by HuggingFace's *tokenizers* library).
308
+
309
+ [`DPRReaderTokenizerFast`] is almost identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
310
+ punctuation splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts
311
+ that are combined to be fed to the [`DPRReader`] model.
312
+
313
+ Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
314
+
315
+ """
316
+
317
+ vocab_files_names = VOCAB_FILES_NAMES
318
+ model_input_names = ["input_ids", "attention_mask"]
319
+ slow_tokenizer_class = DPRReaderTokenizer
venv/lib/python3.10/site-packages/transformers/models/efficientnet/__init__.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import TYPE_CHECKING
19
+
20
+ # rely on isort to merge the imports
21
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_efficientnet": [
26
+ "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
27
+ "EfficientNetConfig",
28
+ "EfficientNetOnnxConfig",
29
+ ]
30
+ }
31
+
32
+ try:
33
+ if not is_vision_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["image_processing_efficientnet"] = ["EfficientNetImageProcessor"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_efficientnet"] = [
47
+ "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "EfficientNetForImageClassification",
49
+ "EfficientNetModel",
50
+ "EfficientNetPreTrainedModel",
51
+ ]
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_efficientnet import (
55
+ EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ EfficientNetConfig,
57
+ EfficientNetOnnxConfig,
58
+ )
59
+
60
+ try:
61
+ if not is_vision_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .image_processing_efficientnet import EfficientNetImageProcessor
67
+
68
+ try:
69
+ if not is_torch_available():
70
+ raise OptionalDependencyNotAvailable()
71
+ except OptionalDependencyNotAvailable:
72
+ pass
73
+ else:
74
+ from .modeling_efficientnet import (
75
+ EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
76
+ EfficientNetForImageClassification,
77
+ EfficientNetModel,
78
+ EfficientNetPreTrainedModel,
79
+ )
80
+
81
+ else:
82
+ import sys
83
+
84
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/configuration_efficientnet.cpython-310.pyc ADDED
Binary file (7.31 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/convert_efficientnet_to_pytorch.cpython-310.pyc ADDED
Binary file (9.38 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/image_processing_efficientnet.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/modeling_efficientnet.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/efficientnet/configuration_efficientnet.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ EfficientNet model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import List, Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class EfficientNetConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an
36
+ EfficientNet model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the EfficientNet
38
+ [google/efficientnet-b7](https://huggingface.co/google/efficientnet-b7) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ num_channels (`int`, *optional*, defaults to 3):
45
+ The number of input channels.
46
+ image_size (`int`, *optional*, defaults to 600):
47
+ The input image size.
48
+ width_coefficient (`float`, *optional*, defaults to 2.0):
49
+ Scaling coefficient for network width at each stage.
50
+ depth_coefficient (`float`, *optional*, defaults to 3.1):
51
+ Scaling coefficient for network depth at each stage.
52
+ depth_divisor `int`, *optional*, defaults to 8):
53
+ A unit of network width.
54
+ kernel_sizes (`List[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
55
+ List of kernel sizes to be used in each block.
56
+ in_channels (`List[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
57
+ List of input channel sizes to be used in each block for convolutional layers.
58
+ out_channels (`List[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
59
+ List of output channel sizes to be used in each block for convolutional layers.
60
+ depthwise_padding (`List[int]`, *optional*, defaults to `[]`):
61
+ List of block indices with square padding.
62
+ strides (`List[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
63
+ List of stride sizes to be used in each block for convolutional layers.
64
+ num_block_repeats (`List[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
65
+ List of the number of times each block is to repeated.
66
+ expand_ratios (`List[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
67
+ List of scaling coefficient of each block.
68
+ squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
69
+ Squeeze expansion ratio.
70
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
71
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
72
+ `"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
73
+ hiddem_dim (`int`, *optional*, defaults to 1280):
74
+ The hidden dimension of the layer before the classification head.
75
+ pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
76
+ Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
77
+ `"max"`]
78
+ initializer_range (`float`, *optional*, defaults to 0.02):
79
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
80
+ batch_norm_eps (`float`, *optional*, defaults to 1e-3):
81
+ The epsilon used by the batch normalization layers.
82
+ batch_norm_momentum (`float`, *optional*, defaults to 0.99):
83
+ The momentum used by the batch normalization layers.
84
+ dropout_rate (`float`, *optional*, defaults to 0.5):
85
+ The dropout rate to be applied before final classifier layer.
86
+ drop_connect_rate (`float`, *optional*, defaults to 0.2):
87
+ The drop rate for skip connections.
88
+
89
+ Example:
90
+ ```python
91
+ >>> from transformers import EfficientNetConfig, EfficientNetModel
92
+
93
+ >>> # Initializing a EfficientNet efficientnet-b7 style configuration
94
+ >>> configuration = EfficientNetConfig()
95
+
96
+ >>> # Initializing a model (with random weights) from the efficientnet-b7 style configuration
97
+ >>> model = EfficientNetModel(configuration)
98
+
99
+ >>> # Accessing the model configuration
100
+ >>> configuration = model.config
101
+ ```"""
102
+
103
+ model_type = "efficientnet"
104
+
105
+ def __init__(
106
+ self,
107
+ num_channels: int = 3,
108
+ image_size: int = 600,
109
+ width_coefficient: float = 2.0,
110
+ depth_coefficient: float = 3.1,
111
+ depth_divisor: int = 8,
112
+ kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3],
113
+ in_channels: List[int] = [32, 16, 24, 40, 80, 112, 192],
114
+ out_channels: List[int] = [16, 24, 40, 80, 112, 192, 320],
115
+ depthwise_padding: List[int] = [],
116
+ strides: List[int] = [1, 2, 2, 2, 1, 2, 1],
117
+ num_block_repeats: List[int] = [1, 2, 2, 3, 3, 4, 1],
118
+ expand_ratios: List[int] = [1, 6, 6, 6, 6, 6, 6],
119
+ squeeze_expansion_ratio: float = 0.25,
120
+ hidden_act: str = "swish",
121
+ hidden_dim: int = 2560,
122
+ pooling_type: str = "mean",
123
+ initializer_range: float = 0.02,
124
+ batch_norm_eps: float = 0.001,
125
+ batch_norm_momentum: float = 0.99,
126
+ dropout_rate: float = 0.5,
127
+ drop_connect_rate: float = 0.2,
128
+ **kwargs,
129
+ ):
130
+ super().__init__(**kwargs)
131
+
132
+ self.num_channels = num_channels
133
+ self.image_size = image_size
134
+ self.width_coefficient = width_coefficient
135
+ self.depth_coefficient = depth_coefficient
136
+ self.depth_divisor = depth_divisor
137
+ self.kernel_sizes = kernel_sizes
138
+ self.in_channels = in_channels
139
+ self.out_channels = out_channels
140
+ self.depthwise_padding = depthwise_padding
141
+ self.strides = strides
142
+ self.num_block_repeats = num_block_repeats
143
+ self.expand_ratios = expand_ratios
144
+ self.squeeze_expansion_ratio = squeeze_expansion_ratio
145
+ self.hidden_act = hidden_act
146
+ self.hidden_dim = hidden_dim
147
+ self.pooling_type = pooling_type
148
+ self.initializer_range = initializer_range
149
+ self.batch_norm_eps = batch_norm_eps
150
+ self.batch_norm_momentum = batch_norm_momentum
151
+ self.dropout_rate = dropout_rate
152
+ self.drop_connect_rate = drop_connect_rate
153
+ self.num_hidden_layers = sum(num_block_repeats) * 4
154
+
155
+
156
+ class EfficientNetOnnxConfig(OnnxConfig):
157
+ torch_onnx_minimum_version = version.parse("1.11")
158
+
159
+ @property
160
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
161
+ return OrderedDict(
162
+ [
163
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
164
+ ]
165
+ )
166
+
167
+ @property
168
+ def atol_for_validation(self) -> float:
169
+ return 1e-5
venv/lib/python3.10/site-packages/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert EfficientNet checkpoints from the original repository.
16
+
17
+ URL: https://github.com/keras-team/keras/blob/v2.11.0/keras/applications/efficientnet.py"""
18
+
19
+ import argparse
20
+ import json
21
+ import os
22
+
23
+ import numpy as np
24
+ import PIL
25
+ import requests
26
+ import tensorflow.keras.applications.efficientnet as efficientnet
27
+ import torch
28
+ from huggingface_hub import hf_hub_download
29
+ from PIL import Image
30
+ from tensorflow.keras.preprocessing import image
31
+
32
+ from transformers import (
33
+ EfficientNetConfig,
34
+ EfficientNetForImageClassification,
35
+ EfficientNetImageProcessor,
36
+ )
37
+ from transformers.utils import logging
38
+
39
+
40
+ logging.set_verbosity_info()
41
+ logger = logging.get_logger(__name__)
42
+
43
+ model_classes = {
44
+ "b0": efficientnet.EfficientNetB0,
45
+ "b1": efficientnet.EfficientNetB1,
46
+ "b2": efficientnet.EfficientNetB2,
47
+ "b3": efficientnet.EfficientNetB3,
48
+ "b4": efficientnet.EfficientNetB4,
49
+ "b5": efficientnet.EfficientNetB5,
50
+ "b6": efficientnet.EfficientNetB6,
51
+ "b7": efficientnet.EfficientNetB7,
52
+ }
53
+
54
+ CONFIG_MAP = {
55
+ "b0": {
56
+ "hidden_dim": 1280,
57
+ "width_coef": 1.0,
58
+ "depth_coef": 1.0,
59
+ "image_size": 224,
60
+ "dropout_rate": 0.2,
61
+ "dw_padding": [],
62
+ },
63
+ "b1": {
64
+ "hidden_dim": 1280,
65
+ "width_coef": 1.0,
66
+ "depth_coef": 1.1,
67
+ "image_size": 240,
68
+ "dropout_rate": 0.2,
69
+ "dw_padding": [16],
70
+ },
71
+ "b2": {
72
+ "hidden_dim": 1408,
73
+ "width_coef": 1.1,
74
+ "depth_coef": 1.2,
75
+ "image_size": 260,
76
+ "dropout_rate": 0.3,
77
+ "dw_padding": [5, 8, 16],
78
+ },
79
+ "b3": {
80
+ "hidden_dim": 1536,
81
+ "width_coef": 1.2,
82
+ "depth_coef": 1.4,
83
+ "image_size": 300,
84
+ "dropout_rate": 0.3,
85
+ "dw_padding": [5, 18],
86
+ },
87
+ "b4": {
88
+ "hidden_dim": 1792,
89
+ "width_coef": 1.4,
90
+ "depth_coef": 1.8,
91
+ "image_size": 380,
92
+ "dropout_rate": 0.4,
93
+ "dw_padding": [6],
94
+ },
95
+ "b5": {
96
+ "hidden_dim": 2048,
97
+ "width_coef": 1.6,
98
+ "depth_coef": 2.2,
99
+ "image_size": 456,
100
+ "dropout_rate": 0.4,
101
+ "dw_padding": [13, 27],
102
+ },
103
+ "b6": {
104
+ "hidden_dim": 2304,
105
+ "width_coef": 1.8,
106
+ "depth_coef": 2.6,
107
+ "image_size": 528,
108
+ "dropout_rate": 0.5,
109
+ "dw_padding": [31],
110
+ },
111
+ "b7": {
112
+ "hidden_dim": 2560,
113
+ "width_coef": 2.0,
114
+ "depth_coef": 3.1,
115
+ "image_size": 600,
116
+ "dropout_rate": 0.5,
117
+ "dw_padding": [18],
118
+ },
119
+ }
120
+
121
+
122
+ def get_efficientnet_config(model_name):
123
+ config = EfficientNetConfig()
124
+ config.hidden_dim = CONFIG_MAP[model_name]["hidden_dim"]
125
+ config.width_coefficient = CONFIG_MAP[model_name]["width_coef"]
126
+ config.depth_coefficient = CONFIG_MAP[model_name]["depth_coef"]
127
+ config.image_size = CONFIG_MAP[model_name]["image_size"]
128
+ config.dropout_rate = CONFIG_MAP[model_name]["dropout_rate"]
129
+ config.depthwise_padding = CONFIG_MAP[model_name]["dw_padding"]
130
+
131
+ repo_id = "huggingface/label-files"
132
+ filename = "imagenet-1k-id2label.json"
133
+ config.num_labels = 1000
134
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
135
+ id2label = {int(k): v for k, v in id2label.items()}
136
+
137
+ config.id2label = id2label
138
+ config.label2id = {v: k for k, v in id2label.items()}
139
+ return config
140
+
141
+
142
+ # We will verify our results on an image of cute cats
143
+ def prepare_img():
144
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
145
+ im = Image.open(requests.get(url, stream=True).raw)
146
+ return im
147
+
148
+
149
+ def convert_image_processor(model_name):
150
+ size = CONFIG_MAP[model_name]["image_size"]
151
+ preprocessor = EfficientNetImageProcessor(
152
+ size={"height": size, "width": size},
153
+ image_mean=[0.485, 0.456, 0.406],
154
+ image_std=[0.47853944, 0.4732864, 0.47434163],
155
+ do_center_crop=False,
156
+ )
157
+ return preprocessor
158
+
159
+
160
+ # here we list all keys to be renamed (original name on the left, our name on the right)
161
+ def rename_keys(original_param_names):
162
+ block_names = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")]
163
+ block_names = sorted(set(block_names))
164
+ num_blocks = len(block_names)
165
+ block_name_mapping = {b: str(i) for b, i in zip(block_names, range(num_blocks))}
166
+
167
+ rename_keys = []
168
+ rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight"))
169
+ rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight"))
170
+ rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias"))
171
+ rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean"))
172
+ rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var"))
173
+
174
+ for b in block_names:
175
+ hf_b = block_name_mapping[b]
176
+ rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight"))
177
+ rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight"))
178
+ rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias"))
179
+ rename_keys.append(
180
+ (f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean")
181
+ )
182
+ rename_keys.append(
183
+ (f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var")
184
+ )
185
+ rename_keys.append(
186
+ (f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight")
187
+ )
188
+ rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight"))
189
+ rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias"))
190
+ rename_keys.append(
191
+ (f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean")
192
+ )
193
+ rename_keys.append(
194
+ (f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var")
195
+ )
196
+
197
+ rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight"))
198
+ rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias"))
199
+ rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight"))
200
+ rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias"))
201
+ rename_keys.append(
202
+ (f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight")
203
+ )
204
+ rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight"))
205
+ rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias"))
206
+ rename_keys.append(
207
+ (f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean")
208
+ )
209
+ rename_keys.append(
210
+ (f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var")
211
+ )
212
+
213
+ rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight"))
214
+ rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight"))
215
+ rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias"))
216
+ rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean"))
217
+ rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var"))
218
+
219
+ key_mapping = {}
220
+ for item in rename_keys:
221
+ if item[0] in original_param_names:
222
+ key_mapping[item[0]] = "efficientnet." + item[1]
223
+
224
+ key_mapping["predictions/kernel:0"] = "classifier.weight"
225
+ key_mapping["predictions/bias:0"] = "classifier.bias"
226
+ return key_mapping
227
+
228
+
229
+ def replace_params(hf_params, tf_params, key_mapping):
230
+ for key, value in tf_params.items():
231
+ if "normalization" in key:
232
+ continue
233
+
234
+ hf_key = key_mapping[key]
235
+ if "_conv" in key and "kernel" in key:
236
+ new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1)
237
+ elif "depthwise_kernel" in key:
238
+ new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1)
239
+ elif "kernel" in key:
240
+ new_hf_value = torch.from_numpy(np.transpose(value))
241
+ else:
242
+ new_hf_value = torch.from_numpy(value)
243
+
244
+ # Replace HF parameters with original TF model parameters
245
+ assert hf_params[hf_key].shape == new_hf_value.shape
246
+ hf_params[hf_key].copy_(new_hf_value)
247
+
248
+
249
+ @torch.no_grad()
250
+ def convert_efficientnet_checkpoint(model_name, pytorch_dump_folder_path, save_model, push_to_hub):
251
+ """
252
+ Copy/paste/tweak model's weights to our EfficientNet structure.
253
+ """
254
+ # Load original model
255
+ original_model = model_classes[model_name](
256
+ include_top=True,
257
+ weights="imagenet",
258
+ input_tensor=None,
259
+ input_shape=None,
260
+ pooling=None,
261
+ classes=1000,
262
+ classifier_activation="softmax",
263
+ )
264
+
265
+ tf_params = original_model.trainable_variables
266
+ tf_non_train_params = original_model.non_trainable_variables
267
+ tf_params = {param.name: param.numpy() for param in tf_params}
268
+ for param in tf_non_train_params:
269
+ tf_params[param.name] = param.numpy()
270
+ tf_param_names = list(tf_params.keys())
271
+
272
+ # Load HuggingFace model
273
+ config = get_efficientnet_config(model_name)
274
+ hf_model = EfficientNetForImageClassification(config).eval()
275
+ hf_params = hf_model.state_dict()
276
+
277
+ # Create src-to-dst parameter name mapping dictionary
278
+ print("Converting parameters...")
279
+ key_mapping = rename_keys(tf_param_names)
280
+ replace_params(hf_params, tf_params, key_mapping)
281
+
282
+ # Initialize preprocessor and preprocess input image
283
+ preprocessor = convert_image_processor(model_name)
284
+ inputs = preprocessor(images=prepare_img(), return_tensors="pt")
285
+
286
+ # HF model inference
287
+ hf_model.eval()
288
+ with torch.no_grad():
289
+ outputs = hf_model(**inputs)
290
+ hf_logits = outputs.logits.detach().numpy()
291
+
292
+ # Original model inference
293
+ original_model.trainable = False
294
+ image_size = CONFIG_MAP[model_name]["image_size"]
295
+ img = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST)
296
+ x = image.img_to_array(img)
297
+ x = np.expand_dims(x, axis=0)
298
+ original_logits = original_model.predict(x)
299
+
300
+ # Check whether original and HF model outputs match -> np.allclose
301
+ assert np.allclose(original_logits, hf_logits, atol=1e-3), "The predicted logits are not the same."
302
+ print("Model outputs match!")
303
+
304
+ if save_model:
305
+ # Create folder to save model
306
+ if not os.path.isdir(pytorch_dump_folder_path):
307
+ os.mkdir(pytorch_dump_folder_path)
308
+ # Save converted model and image processor
309
+ hf_model.save_pretrained(pytorch_dump_folder_path)
310
+ preprocessor.save_pretrained(pytorch_dump_folder_path)
311
+
312
+ if push_to_hub:
313
+ # Push model and image processor to hub
314
+ print(f"Pushing converted {model_name} to the hub...")
315
+ model_name = f"efficientnet-{model_name}"
316
+ preprocessor.push_to_hub(model_name)
317
+ hf_model.push_to_hub(model_name)
318
+
319
+
320
+ if __name__ == "__main__":
321
+ parser = argparse.ArgumentParser()
322
+ # Required parameters
323
+ parser.add_argument(
324
+ "--model_name",
325
+ default="b0",
326
+ type=str,
327
+ help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
328
+ )
329
+ parser.add_argument(
330
+ "--pytorch_dump_folder_path",
331
+ default="hf_model",
332
+ type=str,
333
+ help="Path to the output PyTorch model directory.",
334
+ )
335
+ parser.add_argument("--save_model", action="store_true", help="Save model to local")
336
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
337
+
338
+ args = parser.parse_args()
339
+ convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/efficientnet/image_processing_efficientnet.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for EfficientNet."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import rescale, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ validate_kwargs,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, is_vision_available, logging
38
+
39
+
40
+ if is_vision_available():
41
+ import PIL
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class EfficientNetImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a EfficientNet image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
54
+ `do_resize` in `preprocess`.
55
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`):
56
+ Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
57
+ resample (`PILImageResampling` filter, *optional*, defaults to 0):
58
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
59
+ do_center_crop (`bool`, *optional*, defaults to `False`):
60
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
61
+ is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
62
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`):
63
+ Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
64
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
65
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
66
+ `preprocess` method.
67
+ rescale_offset (`bool`, *optional*, defaults to `False`):
68
+ Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be
69
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
70
+ do_rescale (`bool`, *optional*, defaults to `True`):
71
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
72
+ parameter in the `preprocess` method.
73
+ do_normalize (`bool`, *optional*, defaults to `True`):
74
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
75
+ method.
76
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
77
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
78
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
79
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
80
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
81
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
82
+ include_top (`bool`, *optional*, defaults to `True`):
83
+ Whether to rescale the image again. Should be set to True if the inputs are used for image classification.
84
+ """
85
+
86
+ model_input_names = ["pixel_values"]
87
+
88
+ def __init__(
89
+ self,
90
+ do_resize: bool = True,
91
+ size: Dict[str, int] = None,
92
+ resample: PILImageResampling = PIL.Image.NEAREST,
93
+ do_center_crop: bool = False,
94
+ crop_size: Dict[str, int] = None,
95
+ rescale_factor: Union[int, float] = 1 / 255,
96
+ rescale_offset: bool = False,
97
+ do_rescale: bool = True,
98
+ do_normalize: bool = True,
99
+ image_mean: Optional[Union[float, List[float]]] = None,
100
+ image_std: Optional[Union[float, List[float]]] = None,
101
+ include_top: bool = True,
102
+ **kwargs,
103
+ ) -> None:
104
+ super().__init__(**kwargs)
105
+ size = size if size is not None else {"height": 346, "width": 346}
106
+ size = get_size_dict(size)
107
+ crop_size = crop_size if crop_size is not None else {"height": 289, "width": 289}
108
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
109
+
110
+ self.do_resize = do_resize
111
+ self.size = size
112
+ self.resample = resample
113
+ self.do_center_crop = do_center_crop
114
+ self.crop_size = crop_size
115
+ self.do_rescale = do_rescale
116
+ self.rescale_factor = rescale_factor
117
+ self.rescale_offset = rescale_offset
118
+ self.do_normalize = do_normalize
119
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
120
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
121
+ self.include_top = include_top
122
+ self._valid_processor_keys = [
123
+ "images",
124
+ "do_resize",
125
+ "size",
126
+ "resample",
127
+ "do_center_crop",
128
+ "crop_size",
129
+ "do_rescale",
130
+ "rescale_factor",
131
+ "rescale_offset",
132
+ "do_normalize",
133
+ "image_mean",
134
+ "image_std",
135
+ "include_top",
136
+ "return_tensors",
137
+ "data_format",
138
+ "input_data_format",
139
+ ]
140
+
141
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.NEAREST
142
+ def resize(
143
+ self,
144
+ image: np.ndarray,
145
+ size: Dict[str, int],
146
+ resample: PILImageResampling = PILImageResampling.NEAREST,
147
+ data_format: Optional[Union[str, ChannelDimension]] = None,
148
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
149
+ **kwargs,
150
+ ) -> np.ndarray:
151
+ """
152
+ Resize an image to `(size["height"], size["width"])`.
153
+
154
+ Args:
155
+ image (`np.ndarray`):
156
+ Image to resize.
157
+ size (`Dict[str, int]`):
158
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
159
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.NEAREST`):
160
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.NEAREST`.
161
+ data_format (`ChannelDimension` or `str`, *optional*):
162
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
163
+ image is used. Can be one of:
164
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
165
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
166
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
167
+ input_data_format (`ChannelDimension` or `str`, *optional*):
168
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
169
+ from the input image. Can be one of:
170
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
171
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
172
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
173
+
174
+ Returns:
175
+ `np.ndarray`: The resized image.
176
+ """
177
+ size = get_size_dict(size)
178
+ if "height" not in size or "width" not in size:
179
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
180
+ output_size = (size["height"], size["width"])
181
+ return resize(
182
+ image,
183
+ size=output_size,
184
+ resample=resample,
185
+ data_format=data_format,
186
+ input_data_format=input_data_format,
187
+ **kwargs,
188
+ )
189
+
190
+ def rescale(
191
+ self,
192
+ image: np.ndarray,
193
+ scale: Union[int, float],
194
+ offset: bool = True,
195
+ data_format: Optional[Union[str, ChannelDimension]] = None,
196
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
197
+ **kwargs,
198
+ ):
199
+ """
200
+ Rescale an image by a scale factor.
201
+
202
+ If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is
203
+ 1/127.5, the image is rescaled between [-1, 1].
204
+ image = image * scale - 1
205
+
206
+ If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].
207
+ image = image * scale
208
+
209
+ Args:
210
+ image (`np.ndarray`):
211
+ Image to rescale.
212
+ scale (`int` or `float`):
213
+ Scale to apply to the image.
214
+ offset (`bool`, *optional*):
215
+ Whether to scale the image in both negative and positive directions.
216
+ data_format (`str` or `ChannelDimension`, *optional*):
217
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
218
+ input_data_format (`ChannelDimension` or `str`, *optional*):
219
+ The channel dimension format of the input image. If not provided, it will be inferred.
220
+ """
221
+ rescaled_image = rescale(
222
+ image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs
223
+ )
224
+
225
+ if offset:
226
+ rescaled_image = rescaled_image - 1
227
+
228
+ return rescaled_image
229
+
230
+ def preprocess(
231
+ self,
232
+ images: ImageInput,
233
+ do_resize: bool = None,
234
+ size: Dict[str, int] = None,
235
+ resample=None,
236
+ do_center_crop: bool = None,
237
+ crop_size: Dict[str, int] = None,
238
+ do_rescale: bool = None,
239
+ rescale_factor: float = None,
240
+ rescale_offset: bool = None,
241
+ do_normalize: bool = None,
242
+ image_mean: Optional[Union[float, List[float]]] = None,
243
+ image_std: Optional[Union[float, List[float]]] = None,
244
+ include_top: bool = None,
245
+ return_tensors: Optional[Union[str, TensorType]] = None,
246
+ data_format: ChannelDimension = ChannelDimension.FIRST,
247
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
248
+ **kwargs,
249
+ ) -> PIL.Image.Image:
250
+ """
251
+ Preprocess an image or batch of images.
252
+
253
+ Args:
254
+ images (`ImageInput`):
255
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
256
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
257
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
258
+ Whether to resize the image.
259
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
260
+ Size of the image after `resize`.
261
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
262
+ PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
263
+ `True`.
264
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
265
+ Whether to center crop the image.
266
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
267
+ Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
268
+ padded with zeros and then cropped
269
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
270
+ Whether to rescale the image values between [0 - 1].
271
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
272
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
273
+ rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`):
274
+ Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range].
275
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
276
+ Whether to normalize the image.
277
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
278
+ Image mean.
279
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
280
+ Image standard deviation.
281
+ include_top (`bool`, *optional*, defaults to `self.include_top`):
282
+ Rescales the image again for image classification if set to True.
283
+ return_tensors (`str` or `TensorType`, *optional*):
284
+ The type of tensors to return. Can be one of:
285
+ - `None`: Return a list of `np.ndarray`.
286
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
287
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
288
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
289
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
290
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
291
+ The channel dimension format for the output image. Can be one of:
292
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
293
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
294
+ input_data_format (`ChannelDimension` or `str`, *optional*):
295
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
296
+ from the input image. Can be one of:
297
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
298
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
299
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
300
+ """
301
+ do_resize = do_resize if do_resize is not None else self.do_resize
302
+ resample = resample if resample is not None else self.resample
303
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
304
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
305
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
306
+ rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset
307
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
308
+ image_mean = image_mean if image_mean is not None else self.image_mean
309
+ image_std = image_std if image_std is not None else self.image_std
310
+ include_top = include_top if include_top is not None else self.include_top
311
+
312
+ size = size if size is not None else self.size
313
+ size = get_size_dict(size)
314
+ crop_size = crop_size if crop_size is not None else self.crop_size
315
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
316
+
317
+ images = make_list_of_images(images)
318
+
319
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
320
+
321
+ if not valid_images(images):
322
+ raise ValueError(
323
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
324
+ "torch.Tensor, tf.Tensor or jax.ndarray."
325
+ )
326
+ validate_preprocess_arguments(
327
+ do_rescale=do_rescale,
328
+ rescale_factor=rescale_factor,
329
+ do_normalize=do_normalize,
330
+ image_mean=image_mean,
331
+ image_std=image_std,
332
+ do_center_crop=do_center_crop,
333
+ crop_size=crop_size,
334
+ do_resize=do_resize,
335
+ size=size,
336
+ resample=resample,
337
+ )
338
+ # All transformations expect numpy arrays.
339
+ images = [to_numpy_array(image) for image in images]
340
+
341
+ if is_scaled_image(images[0]) and do_rescale:
342
+ logger.warning_once(
343
+ "It looks like you are trying to rescale already rescaled images. If the input"
344
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
345
+ )
346
+
347
+ if input_data_format is None:
348
+ # We assume that all images have the same channel dimension format.
349
+ input_data_format = infer_channel_dimension_format(images[0])
350
+
351
+ if do_resize:
352
+ images = [
353
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
354
+ for image in images
355
+ ]
356
+
357
+ if do_center_crop:
358
+ images = [
359
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
360
+ ]
361
+
362
+ if do_rescale:
363
+ images = [
364
+ self.rescale(
365
+ image=image, scale=rescale_factor, offset=rescale_offset, input_data_format=input_data_format
366
+ )
367
+ for image in images
368
+ ]
369
+
370
+ if do_normalize:
371
+ images = [
372
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
373
+ for image in images
374
+ ]
375
+
376
+ if include_top:
377
+ images = [
378
+ self.normalize(image=image, mean=0, std=image_std, input_data_format=input_data_format)
379
+ for image in images
380
+ ]
381
+
382
+ images = [
383
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
384
+ ]
385
+
386
+ data = {"pixel_values": images}
387
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/efficientnet/modeling_efficientnet.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch EfficientNet model."""
16
+
17
+
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ )
39
+ from .configuration_efficientnet import EfficientNetConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ # General docstring
45
+ _CONFIG_FOR_DOC = "EfficientNetConfig"
46
+
47
+ # Base docstring
48
+ _CHECKPOINT_FOR_DOC = "google/efficientnet-b7"
49
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
50
+
51
+ # Image classification docstring
52
+ _IMAGE_CLASS_CHECKPOINT = "google/efficientnet-b7"
53
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
54
+
55
+
56
+ from ..deprecated._archive_maps import EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ EFFICIENTNET_START_DOCSTRING = r"""
60
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
61
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
62
+ behavior.
63
+
64
+ Parameters:
65
+ config ([`EfficientNetConfig`]): Model configuration class with all the parameters of the model.
66
+ Initializing with a config file does not load the weights associated with the model, only the
67
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
68
+ """
69
+
70
+ EFFICIENTNET_INPUTS_DOCSTRING = r"""
71
+ Args:
72
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
73
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
74
+ [`AutoImageProcessor.__call__`] for details.
75
+
76
+ output_hidden_states (`bool`, *optional*):
77
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
78
+ more detail.
79
+ return_dict (`bool`, *optional*):
80
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
81
+ """
82
+
83
+
84
+ def round_filters(config: EfficientNetConfig, num_channels: int):
85
+ r"""
86
+ Round number of filters based on depth multiplier.
87
+ """
88
+ divisor = config.depth_divisor
89
+ num_channels *= config.width_coefficient
90
+ new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
91
+
92
+ # Make sure that round down does not go down by more than 10%.
93
+ if new_dim < 0.9 * num_channels:
94
+ new_dim += divisor
95
+
96
+ return int(new_dim)
97
+
98
+
99
+ def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True):
100
+ r"""
101
+ Utility function to get the tuple padding value for the depthwise convolution.
102
+
103
+ Args:
104
+ kernel_size (`int` or `tuple`):
105
+ Kernel size of the convolution layers.
106
+ adjust (`bool`, *optional*, defaults to `True`):
107
+ Adjusts padding value to apply to right and bottom sides of the input.
108
+ """
109
+ if isinstance(kernel_size, int):
110
+ kernel_size = (kernel_size, kernel_size)
111
+
112
+ correct = (kernel_size[0] // 2, kernel_size[1] // 2)
113
+ if adjust:
114
+ return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
115
+ else:
116
+ return (correct[1], correct[1], correct[0], correct[0])
117
+
118
+
119
+ class EfficientNetEmbeddings(nn.Module):
120
+ r"""
121
+ A module that corresponds to the stem module of the original work.
122
+ """
123
+
124
+ def __init__(self, config: EfficientNetConfig):
125
+ super().__init__()
126
+
127
+ self.out_dim = round_filters(config, 32)
128
+ self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
129
+ self.convolution = nn.Conv2d(
130
+ config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False
131
+ )
132
+ self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
133
+ self.activation = ACT2FN[config.hidden_act]
134
+
135
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
136
+ features = self.padding(pixel_values)
137
+ features = self.convolution(features)
138
+ features = self.batchnorm(features)
139
+ features = self.activation(features)
140
+
141
+ return features
142
+
143
+
144
+ class EfficientNetDepthwiseConv2d(nn.Conv2d):
145
+ def __init__(
146
+ self,
147
+ in_channels,
148
+ depth_multiplier=1,
149
+ kernel_size=3,
150
+ stride=1,
151
+ padding=0,
152
+ dilation=1,
153
+ bias=True,
154
+ padding_mode="zeros",
155
+ ):
156
+ out_channels = in_channels * depth_multiplier
157
+ super().__init__(
158
+ in_channels=in_channels,
159
+ out_channels=out_channels,
160
+ kernel_size=kernel_size,
161
+ stride=stride,
162
+ padding=padding,
163
+ dilation=dilation,
164
+ groups=in_channels,
165
+ bias=bias,
166
+ padding_mode=padding_mode,
167
+ )
168
+
169
+
170
+ class EfficientNetExpansionLayer(nn.Module):
171
+ r"""
172
+ This corresponds to the expansion phase of each block in the original implementation.
173
+ """
174
+
175
+ def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int):
176
+ super().__init__()
177
+ self.expand_conv = nn.Conv2d(
178
+ in_channels=in_dim,
179
+ out_channels=out_dim,
180
+ kernel_size=1,
181
+ padding="same",
182
+ bias=False,
183
+ )
184
+ self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
185
+ self.expand_act = ACT2FN[config.hidden_act]
186
+
187
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
188
+ # Expand phase
189
+ hidden_states = self.expand_conv(hidden_states)
190
+ hidden_states = self.expand_bn(hidden_states)
191
+ hidden_states = self.expand_act(hidden_states)
192
+
193
+ return hidden_states
194
+
195
+
196
+ class EfficientNetDepthwiseLayer(nn.Module):
197
+ r"""
198
+ This corresponds to the depthwise convolution phase of each block in the original implementation.
199
+ """
200
+
201
+ def __init__(
202
+ self,
203
+ config: EfficientNetConfig,
204
+ in_dim: int,
205
+ stride: int,
206
+ kernel_size: int,
207
+ adjust_padding: bool,
208
+ ):
209
+ super().__init__()
210
+ self.stride = stride
211
+ conv_pad = "valid" if self.stride == 2 else "same"
212
+ padding = correct_pad(kernel_size, adjust=adjust_padding)
213
+
214
+ self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
215
+ self.depthwise_conv = EfficientNetDepthwiseConv2d(
216
+ in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False
217
+ )
218
+ self.depthwise_norm = nn.BatchNorm2d(
219
+ num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
220
+ )
221
+ self.depthwise_act = ACT2FN[config.hidden_act]
222
+
223
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
224
+ # Depthwise convolution
225
+ if self.stride == 2:
226
+ hidden_states = self.depthwise_conv_pad(hidden_states)
227
+
228
+ hidden_states = self.depthwise_conv(hidden_states)
229
+ hidden_states = self.depthwise_norm(hidden_states)
230
+ hidden_states = self.depthwise_act(hidden_states)
231
+
232
+ return hidden_states
233
+
234
+
235
+ class EfficientNetSqueezeExciteLayer(nn.Module):
236
+ r"""
237
+ This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
238
+ """
239
+
240
+ def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool = False):
241
+ super().__init__()
242
+ self.dim = expand_dim if expand else in_dim
243
+ self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
244
+
245
+ self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
246
+ self.reduce = nn.Conv2d(
247
+ in_channels=self.dim,
248
+ out_channels=self.dim_se,
249
+ kernel_size=1,
250
+ padding="same",
251
+ )
252
+ self.expand = nn.Conv2d(
253
+ in_channels=self.dim_se,
254
+ out_channels=self.dim,
255
+ kernel_size=1,
256
+ padding="same",
257
+ )
258
+ self.act_reduce = ACT2FN[config.hidden_act]
259
+ self.act_expand = nn.Sigmoid()
260
+
261
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
262
+ inputs = hidden_states
263
+ hidden_states = self.squeeze(hidden_states)
264
+ hidden_states = self.reduce(hidden_states)
265
+ hidden_states = self.act_reduce(hidden_states)
266
+
267
+ hidden_states = self.expand(hidden_states)
268
+ hidden_states = self.act_expand(hidden_states)
269
+ hidden_states = torch.mul(inputs, hidden_states)
270
+
271
+ return hidden_states
272
+
273
+
274
+ class EfficientNetFinalBlockLayer(nn.Module):
275
+ r"""
276
+ This corresponds to the final phase of each block in the original implementation.
277
+ """
278
+
279
+ def __init__(
280
+ self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool
281
+ ):
282
+ super().__init__()
283
+ self.apply_dropout = stride == 1 and not id_skip
284
+ self.project_conv = nn.Conv2d(
285
+ in_channels=in_dim,
286
+ out_channels=out_dim,
287
+ kernel_size=1,
288
+ padding="same",
289
+ bias=False,
290
+ )
291
+ self.project_bn = nn.BatchNorm2d(
292
+ num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
293
+ )
294
+ self.dropout = nn.Dropout(p=drop_rate)
295
+
296
+ def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
297
+ hidden_states = self.project_conv(hidden_states)
298
+ hidden_states = self.project_bn(hidden_states)
299
+
300
+ if self.apply_dropout:
301
+ hidden_states = self.dropout(hidden_states)
302
+ hidden_states = hidden_states + embeddings
303
+
304
+ return hidden_states
305
+
306
+
307
+ class EfficientNetBlock(nn.Module):
308
+ r"""
309
+ This corresponds to the expansion and depthwise convolution phase of each block in the original implementation.
310
+
311
+ Args:
312
+ config ([`EfficientNetConfig`]):
313
+ Model configuration class.
314
+ in_dim (`int`):
315
+ Number of input channels.
316
+ out_dim (`int`):
317
+ Number of output channels.
318
+ stride (`int`):
319
+ Stride size to be used in convolution layers.
320
+ expand_ratio (`int`):
321
+ Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
322
+ kernel_size (`int`):
323
+ Kernel size for the depthwise convolution layer.
324
+ drop_rate (`float`):
325
+ Dropout rate to be used in the final phase of each block.
326
+ id_skip (`bool`):
327
+ Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
328
+ of each block. Set to `True` for the first block of each stage.
329
+ adjust_padding (`bool`):
330
+ Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
331
+ operation, set to `True` for inputs with odd input sizes.
332
+ """
333
+
334
+ def __init__(
335
+ self,
336
+ config: EfficientNetConfig,
337
+ in_dim: int,
338
+ out_dim: int,
339
+ stride: int,
340
+ expand_ratio: int,
341
+ kernel_size: int,
342
+ drop_rate: float,
343
+ id_skip: bool,
344
+ adjust_padding: bool,
345
+ ):
346
+ super().__init__()
347
+ self.expand_ratio = expand_ratio
348
+ self.expand = True if self.expand_ratio != 1 else False
349
+ expand_in_dim = in_dim * expand_ratio
350
+
351
+ if self.expand:
352
+ self.expansion = EfficientNetExpansionLayer(
353
+ config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride
354
+ )
355
+
356
+ self.depthwise_conv = EfficientNetDepthwiseLayer(
357
+ config=config,
358
+ in_dim=expand_in_dim if self.expand else in_dim,
359
+ stride=stride,
360
+ kernel_size=kernel_size,
361
+ adjust_padding=adjust_padding,
362
+ )
363
+ self.squeeze_excite = EfficientNetSqueezeExciteLayer(
364
+ config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand
365
+ )
366
+ self.projection = EfficientNetFinalBlockLayer(
367
+ config=config,
368
+ in_dim=expand_in_dim if self.expand else in_dim,
369
+ out_dim=out_dim,
370
+ stride=stride,
371
+ drop_rate=drop_rate,
372
+ id_skip=id_skip,
373
+ )
374
+
375
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
376
+ embeddings = hidden_states
377
+ # Expansion and depthwise convolution phase
378
+ if self.expand_ratio != 1:
379
+ hidden_states = self.expansion(hidden_states)
380
+ hidden_states = self.depthwise_conv(hidden_states)
381
+
382
+ # Squeeze and excite phase
383
+ hidden_states = self.squeeze_excite(hidden_states)
384
+ hidden_states = self.projection(embeddings, hidden_states)
385
+ return hidden_states
386
+
387
+
388
+ class EfficientNetEncoder(nn.Module):
389
+ r"""
390
+ Forward propogates the embeddings through each EfficientNet block.
391
+
392
+ Args:
393
+ config ([`EfficientNetConfig`]):
394
+ Model configuration class.
395
+ """
396
+
397
+ def __init__(self, config: EfficientNetConfig):
398
+ super().__init__()
399
+ self.config = config
400
+ self.depth_coefficient = config.depth_coefficient
401
+
402
+ def round_repeats(repeats):
403
+ # Round number of block repeats based on depth multiplier.
404
+ return int(math.ceil(self.depth_coefficient * repeats))
405
+
406
+ num_base_blocks = len(config.in_channels)
407
+ num_blocks = sum(round_repeats(n) for n in config.num_block_repeats)
408
+
409
+ curr_block_num = 0
410
+ blocks = []
411
+ for i in range(num_base_blocks):
412
+ in_dim = round_filters(config, config.in_channels[i])
413
+ out_dim = round_filters(config, config.out_channels[i])
414
+ stride = config.strides[i]
415
+ kernel_size = config.kernel_sizes[i]
416
+ expand_ratio = config.expand_ratios[i]
417
+
418
+ for j in range(round_repeats(config.num_block_repeats[i])):
419
+ id_skip = True if j == 0 else False
420
+ stride = 1 if j > 0 else stride
421
+ in_dim = out_dim if j > 0 else in_dim
422
+ adjust_padding = False if curr_block_num in config.depthwise_padding else True
423
+ drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
424
+
425
+ block = EfficientNetBlock(
426
+ config=config,
427
+ in_dim=in_dim,
428
+ out_dim=out_dim,
429
+ stride=stride,
430
+ kernel_size=kernel_size,
431
+ expand_ratio=expand_ratio,
432
+ drop_rate=drop_rate,
433
+ id_skip=id_skip,
434
+ adjust_padding=adjust_padding,
435
+ )
436
+ blocks.append(block)
437
+ curr_block_num += 1
438
+
439
+ self.blocks = nn.ModuleList(blocks)
440
+ self.top_conv = nn.Conv2d(
441
+ in_channels=out_dim,
442
+ out_channels=round_filters(config, 1280),
443
+ kernel_size=1,
444
+ padding="same",
445
+ bias=False,
446
+ )
447
+ self.top_bn = nn.BatchNorm2d(
448
+ num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
449
+ )
450
+ self.top_activation = ACT2FN[config.hidden_act]
451
+
452
+ def forward(
453
+ self,
454
+ hidden_states: torch.FloatTensor,
455
+ output_hidden_states: Optional[bool] = False,
456
+ return_dict: Optional[bool] = True,
457
+ ) -> BaseModelOutputWithNoAttention:
458
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
459
+
460
+ for block in self.blocks:
461
+ hidden_states = block(hidden_states)
462
+ if output_hidden_states:
463
+ all_hidden_states += (hidden_states,)
464
+
465
+ hidden_states = self.top_conv(hidden_states)
466
+ hidden_states = self.top_bn(hidden_states)
467
+ hidden_states = self.top_activation(hidden_states)
468
+
469
+ if not return_dict:
470
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
471
+
472
+ return BaseModelOutputWithNoAttention(
473
+ last_hidden_state=hidden_states,
474
+ hidden_states=all_hidden_states,
475
+ )
476
+
477
+
478
+ class EfficientNetPreTrainedModel(PreTrainedModel):
479
+ """
480
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
481
+ models.
482
+ """
483
+
484
+ config_class = EfficientNetConfig
485
+ base_model_prefix = "efficientnet"
486
+ main_input_name = "pixel_values"
487
+ _no_split_modules = []
488
+
489
+ def _init_weights(self, module):
490
+ """Initialize the weights"""
491
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
492
+ # Slightly different from the TF version which uses truncated_normal for initialization
493
+ # cf https://github.com/pytorch/pytorch/pull/5617
494
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
495
+ if module.bias is not None:
496
+ module.bias.data.zero_()
497
+ elif isinstance(module, nn.LayerNorm):
498
+ module.bias.data.zero_()
499
+ module.weight.data.fill_(1.0)
500
+
501
+
502
+ @add_start_docstrings(
503
+ "The bare EfficientNet model outputting raw features without any specific head on top.",
504
+ EFFICIENTNET_START_DOCSTRING,
505
+ )
506
+ class EfficientNetModel(EfficientNetPreTrainedModel):
507
+ def __init__(self, config: EfficientNetConfig):
508
+ super().__init__(config)
509
+ self.config = config
510
+ self.embeddings = EfficientNetEmbeddings(config)
511
+ self.encoder = EfficientNetEncoder(config)
512
+
513
+ # Final pooling layer
514
+ if config.pooling_type == "mean":
515
+ self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
516
+ elif config.pooling_type == "max":
517
+ self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
518
+ else:
519
+ raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
520
+
521
+ # Initialize weights and apply final processing
522
+ self.post_init()
523
+
524
+ @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING)
525
+ @add_code_sample_docstrings(
526
+ checkpoint=_CHECKPOINT_FOR_DOC,
527
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
528
+ config_class=_CONFIG_FOR_DOC,
529
+ modality="vision",
530
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
531
+ )
532
+ def forward(
533
+ self,
534
+ pixel_values: torch.FloatTensor = None,
535
+ output_hidden_states: Optional[bool] = None,
536
+ return_dict: Optional[bool] = None,
537
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
538
+ output_hidden_states = (
539
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
540
+ )
541
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
542
+
543
+ if pixel_values is None:
544
+ raise ValueError("You have to specify pixel_values")
545
+
546
+ embedding_output = self.embeddings(pixel_values)
547
+
548
+ encoder_outputs = self.encoder(
549
+ embedding_output,
550
+ output_hidden_states=output_hidden_states,
551
+ return_dict=return_dict,
552
+ )
553
+ # Apply pooling
554
+ last_hidden_state = encoder_outputs[0]
555
+ pooled_output = self.pooler(last_hidden_state)
556
+ # Reshape (batch_size, 1280, 1 , 1) -> (batch_size, 1280)
557
+ pooled_output = pooled_output.reshape(pooled_output.shape[:2])
558
+
559
+ if not return_dict:
560
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
561
+
562
+ return BaseModelOutputWithPoolingAndNoAttention(
563
+ last_hidden_state=last_hidden_state,
564
+ pooler_output=pooled_output,
565
+ hidden_states=encoder_outputs.hidden_states,
566
+ )
567
+
568
+
569
+ @add_start_docstrings(
570
+ """
571
+ EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g.
572
+ for ImageNet.
573
+ """,
574
+ EFFICIENTNET_START_DOCSTRING,
575
+ )
576
+ class EfficientNetForImageClassification(EfficientNetPreTrainedModel):
577
+ def __init__(self, config):
578
+ super().__init__(config)
579
+ self.num_labels = config.num_labels
580
+ self.config = config
581
+ self.efficientnet = EfficientNetModel(config)
582
+ # Classifier head
583
+ self.dropout = nn.Dropout(p=config.dropout_rate)
584
+ self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity()
585
+
586
+ # Initialize weights and apply final processing
587
+ self.post_init()
588
+
589
+ @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING)
590
+ @add_code_sample_docstrings(
591
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
592
+ output_type=ImageClassifierOutputWithNoAttention,
593
+ config_class=_CONFIG_FOR_DOC,
594
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
595
+ )
596
+ def forward(
597
+ self,
598
+ pixel_values: torch.FloatTensor = None,
599
+ labels: Optional[torch.LongTensor] = None,
600
+ output_hidden_states: Optional[bool] = None,
601
+ return_dict: Optional[bool] = None,
602
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
603
+ r"""
604
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
605
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
606
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
607
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
608
+ """
609
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
610
+
611
+ outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
612
+
613
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
614
+ pooled_output = self.dropout(pooled_output)
615
+ logits = self.classifier(pooled_output)
616
+
617
+ loss = None
618
+ if labels is not None:
619
+ if self.config.problem_type is None:
620
+ if self.num_labels == 1:
621
+ self.config.problem_type = "regression"
622
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
623
+ self.config.problem_type = "single_label_classification"
624
+ else:
625
+ self.config.problem_type = "multi_label_classification"
626
+
627
+ if self.config.problem_type == "regression":
628
+ loss_fct = MSELoss()
629
+ if self.num_labels == 1:
630
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
631
+ else:
632
+ loss = loss_fct(logits, labels)
633
+ elif self.config.problem_type == "single_label_classification":
634
+ loss_fct = CrossEntropyLoss()
635
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
636
+ elif self.config.problem_type == "multi_label_classification":
637
+ loss_fct = BCEWithLogitsLoss()
638
+ loss = loss_fct(logits, labels)
639
+
640
+ if not return_dict:
641
+ output = (logits,) + outputs[2:]
642
+ return ((loss,) + output) if loss is not None else output
643
+
644
+ return ImageClassifierOutputWithNoAttention(
645
+ loss=loss,
646
+ logits=logits,
647
+ hidden_states=outputs.hidden_states,
648
+ )
venv/lib/python3.10/site-packages/transformers/models/flaubert/__init__.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertOnnxConfig"],
22
+ "tokenization_flaubert": ["FlaubertTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_flaubert"] = [
32
+ "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "FlaubertForMultipleChoice",
34
+ "FlaubertForQuestionAnswering",
35
+ "FlaubertForQuestionAnsweringSimple",
36
+ "FlaubertForSequenceClassification",
37
+ "FlaubertForTokenClassification",
38
+ "FlaubertModel",
39
+ "FlaubertWithLMHeadModel",
40
+ "FlaubertPreTrainedModel",
41
+ ]
42
+
43
+ try:
44
+ if not is_tf_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_tf_flaubert"] = [
50
+ "TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "TFFlaubertForMultipleChoice",
52
+ "TFFlaubertForQuestionAnsweringSimple",
53
+ "TFFlaubertForSequenceClassification",
54
+ "TFFlaubertForTokenClassification",
55
+ "TFFlaubertModel",
56
+ "TFFlaubertPreTrainedModel",
57
+ "TFFlaubertWithLMHeadModel",
58
+ ]
59
+
60
+
61
+ if TYPE_CHECKING:
62
+ from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertOnnxConfig
63
+ from .tokenization_flaubert import FlaubertTokenizer
64
+
65
+ try:
66
+ if not is_torch_available():
67
+ raise OptionalDependencyNotAvailable()
68
+ except OptionalDependencyNotAvailable:
69
+ pass
70
+ else:
71
+ from .modeling_flaubert import (
72
+ FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
73
+ FlaubertForMultipleChoice,
74
+ FlaubertForQuestionAnswering,
75
+ FlaubertForQuestionAnsweringSimple,
76
+ FlaubertForSequenceClassification,
77
+ FlaubertForTokenClassification,
78
+ FlaubertModel,
79
+ FlaubertPreTrainedModel,
80
+ FlaubertWithLMHeadModel,
81
+ )
82
+
83
+ try:
84
+ if not is_tf_available():
85
+ raise OptionalDependencyNotAvailable()
86
+ except OptionalDependencyNotAvailable:
87
+ pass
88
+ else:
89
+ from .modeling_tf_flaubert import (
90
+ TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
91
+ TFFlaubertForMultipleChoice,
92
+ TFFlaubertForQuestionAnsweringSimple,
93
+ TFFlaubertForSequenceClassification,
94
+ TFFlaubertForTokenClassification,
95
+ TFFlaubertModel,
96
+ TFFlaubertPreTrainedModel,
97
+ TFFlaubertWithLMHeadModel,
98
+ )
99
+
100
+ else:
101
+ import sys
102
+
103
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/configuration_flaubert.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_flaubert.cpython-310.pyc ADDED
Binary file (38.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/modeling_tf_flaubert.cpython-310.pyc ADDED
Binary file (38.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flaubert/__pycache__/tokenization_flaubert.cpython-310.pyc ADDED
Binary file (18.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flaubert/configuration_flaubert.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Flaubert configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class FlaubertConfig(PretrainedConfig):
31
+ """
32
+ This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is
33
+ used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.
34
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FlauBERT
35
+ [flaubert/flaubert_base_uncased](https://huggingface.co/flaubert/flaubert_base_uncased) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ pre_norm (`bool`, *optional*, defaults to `False`):
42
+ Whether to apply the layer normalization before or after the feed forward layer following the attention in
43
+ each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018)
44
+ layerdrop (`float`, *optional*, defaults to 0.0):
45
+ Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with
46
+ Structured Dropout. ICLR 2020)
47
+ vocab_size (`int`, *optional*, defaults to 30145):
48
+ Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by
49
+ the `inputs_ids` passed when calling [`FlaubertModel`] or [`TFFlaubertModel`].
50
+ emb_dim (`int`, *optional*, defaults to 2048):
51
+ Dimensionality of the encoder layers and the pooler layer.
52
+ n_layer (`int`, *optional*, defaults to 12):
53
+ Number of hidden layers in the Transformer encoder.
54
+ n_head (`int`, *optional*, defaults to 16):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ dropout (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for the attention mechanism
60
+ gelu_activation (`bool`, *optional*, defaults to `True`):
61
+ Whether or not to use a *gelu* activation instead of *relu*.
62
+ sinusoidal_embeddings (`bool`, *optional*, defaults to `False`):
63
+ Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.
64
+ causal (`bool`, *optional*, defaults to `False`):
65
+ Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in
66
+ order to only attend to the left-side context instead if a bidirectional context.
67
+ asm (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction
69
+ layer.
70
+ n_langs (`int`, *optional*, defaults to 1):
71
+ The number of languages the model handles. Set to 1 for monolingual models.
72
+ use_lang_emb (`bool`, *optional*, defaults to `True`)
73
+ Whether to use language embeddings. Some models use additional language embeddings, see [the multilingual
74
+ models page](http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings) for information
75
+ on how to use them.
76
+ max_position_embeddings (`int`, *optional*, defaults to 512):
77
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
78
+ just in case (e.g., 512 or 1024 or 2048).
79
+ embed_init_std (`float`, *optional*, defaults to 2048^-0.5):
80
+ The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.
81
+ init_std (`int`, *optional*, defaults to 50257):
82
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the
83
+ embedding matrices.
84
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
85
+ The epsilon used by the layer normalization layers.
86
+ bos_index (`int`, *optional*, defaults to 0):
87
+ The index of the beginning of sentence token in the vocabulary.
88
+ eos_index (`int`, *optional*, defaults to 1):
89
+ The index of the end of sentence token in the vocabulary.
90
+ pad_index (`int`, *optional*, defaults to 2):
91
+ The index of the padding token in the vocabulary.
92
+ unk_index (`int`, *optional*, defaults to 3):
93
+ The index of the unknown token in the vocabulary.
94
+ mask_index (`int`, *optional*, defaults to 5):
95
+ The index of the masking token in the vocabulary.
96
+ is_encoder(`bool`, *optional*, defaults to `True`):
97
+ Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.
98
+ summary_type (`string`, *optional*, defaults to "first"):
99
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
100
+
101
+ Has to be one of the following options:
102
+
103
+ - `"last"`: Take the last token hidden state (like XLNet).
104
+ - `"first"`: Take the first token hidden state (like BERT).
105
+ - `"mean"`: Take the mean of all tokens hidden states.
106
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
107
+ - `"attn"`: Not implemented now, use multi-head attention.
108
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
109
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
110
+
111
+ Whether or not to add a projection after the vector extraction.
112
+ summary_activation (`str`, *optional*):
113
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
114
+
115
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
116
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
117
+ Used in the sequence classification and multiple choice models.
118
+
119
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
120
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
121
+ Used in the sequence classification and multiple choice models.
122
+
123
+ The dropout ratio to be used after the projection and activation.
124
+ start_n_top (`int`, *optional*, defaults to 5):
125
+ Used in the SQuAD evaluation script.
126
+ end_n_top (`int`, *optional*, defaults to 5):
127
+ Used in the SQuAD evaluation script.
128
+ mask_token_id (`int`, *optional*, defaults to 0):
129
+ Model agnostic parameter to identify masked tokens when generating text in an MLM context.
130
+ lang_id (`int`, *optional*, defaults to 1):
131
+ The ID of the language used by the model. This parameter is used when generating text in a given language.
132
+ """
133
+
134
+ model_type = "flaubert"
135
+ attribute_map = {
136
+ "hidden_size": "emb_dim",
137
+ "num_attention_heads": "n_heads",
138
+ "num_hidden_layers": "n_layers",
139
+ "n_words": "vocab_size", # For backward compatibility
140
+ }
141
+
142
+ def __init__(
143
+ self,
144
+ pre_norm=False,
145
+ layerdrop=0.0,
146
+ vocab_size=30145,
147
+ emb_dim=2048,
148
+ n_layers=12,
149
+ n_heads=16,
150
+ dropout=0.1,
151
+ attention_dropout=0.1,
152
+ gelu_activation=True,
153
+ sinusoidal_embeddings=False,
154
+ causal=False,
155
+ asm=False,
156
+ n_langs=1,
157
+ use_lang_emb=True,
158
+ max_position_embeddings=512,
159
+ embed_init_std=2048**-0.5,
160
+ layer_norm_eps=1e-12,
161
+ init_std=0.02,
162
+ bos_index=0,
163
+ eos_index=1,
164
+ pad_index=2,
165
+ unk_index=3,
166
+ mask_index=5,
167
+ is_encoder=True,
168
+ summary_type="first",
169
+ summary_use_proj=True,
170
+ summary_activation=None,
171
+ summary_proj_to_labels=True,
172
+ summary_first_dropout=0.1,
173
+ start_n_top=5,
174
+ end_n_top=5,
175
+ mask_token_id=0,
176
+ lang_id=0,
177
+ pad_token_id=2,
178
+ bos_token_id=0,
179
+ **kwargs,
180
+ ):
181
+ """Constructs FlaubertConfig."""
182
+ self.pre_norm = pre_norm
183
+ self.layerdrop = layerdrop
184
+ self.vocab_size = vocab_size
185
+ self.emb_dim = emb_dim
186
+ self.n_layers = n_layers
187
+ self.n_heads = n_heads
188
+ self.dropout = dropout
189
+ self.attention_dropout = attention_dropout
190
+ self.gelu_activation = gelu_activation
191
+ self.sinusoidal_embeddings = sinusoidal_embeddings
192
+ self.causal = causal
193
+ self.asm = asm
194
+ self.n_langs = n_langs
195
+ self.use_lang_emb = use_lang_emb
196
+ self.layer_norm_eps = layer_norm_eps
197
+ self.bos_index = bos_index
198
+ self.eos_index = eos_index
199
+ self.pad_index = pad_index
200
+ self.unk_index = unk_index
201
+ self.mask_index = mask_index
202
+ self.is_encoder = is_encoder
203
+ self.max_position_embeddings = max_position_embeddings
204
+ self.embed_init_std = embed_init_std
205
+ self.init_std = init_std
206
+ self.summary_type = summary_type
207
+ self.summary_use_proj = summary_use_proj
208
+ self.summary_activation = summary_activation
209
+ self.summary_proj_to_labels = summary_proj_to_labels
210
+ self.summary_first_dropout = summary_first_dropout
211
+ self.start_n_top = start_n_top
212
+ self.end_n_top = end_n_top
213
+ self.mask_token_id = mask_token_id
214
+ self.lang_id = lang_id
215
+
216
+ if "n_words" in kwargs:
217
+ self.n_words = kwargs["n_words"]
218
+
219
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs)
220
+
221
+
222
+ class FlaubertOnnxConfig(OnnxConfig):
223
+ @property
224
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
225
+ if self.task == "multiple-choice":
226
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
227
+ else:
228
+ dynamic_axis = {0: "batch", 1: "sequence"}
229
+ return OrderedDict(
230
+ [
231
+ ("input_ids", dynamic_axis),
232
+ ("attention_mask", dynamic_axis),
233
+ ]
234
+ )
venv/lib/python3.10/site-packages/transformers/models/flaubert/modeling_flaubert.py ADDED
@@ -0,0 +1,1302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Flaubert model, based on XLM."""
16
+
17
+ import itertools
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Dict, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import gelu
28
+ from ...modeling_outputs import (
29
+ BaseModelOutput,
30
+ MaskedLMOutput,
31
+ MultipleChoiceModelOutput,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead
37
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
38
+ from ...utils import (
39
+ ModelOutput,
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from .configuration_flaubert import FlaubertConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "flaubert/flaubert_base_cased"
52
+ _CONFIG_FOR_DOC = "FlaubertConfig"
53
+
54
+
55
+ from ..deprecated._archive_maps import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ # Copied from transformers.models.xlm.modeling_xlm.create_sinusoidal_embeddings
59
+ def create_sinusoidal_embeddings(n_pos, dim, out):
60
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
61
+ out.requires_grad = False
62
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
63
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
64
+ out.detach_()
65
+
66
+
67
+ # Copied from transformers.models.xlm.modeling_xlm.get_masks
68
+ def get_masks(slen, lengths, causal, padding_mask=None):
69
+ """
70
+ Generate hidden states mask, and optionally an attention mask.
71
+ """
72
+ alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
73
+ if padding_mask is not None:
74
+ mask = padding_mask
75
+ else:
76
+ assert lengths.max().item() <= slen
77
+ mask = alen < lengths[:, None]
78
+
79
+ # attention mask is the same as mask, or triangular inferior attention (causal)
80
+ bs = lengths.size(0)
81
+ if causal:
82
+ attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
83
+ else:
84
+ attn_mask = mask
85
+
86
+ # sanity check
87
+ assert mask.size() == (bs, slen)
88
+ assert causal is False or attn_mask.size() == (bs, slen, slen)
89
+
90
+ return mask, attn_mask
91
+
92
+
93
+ # Copied from transformers.models.xlm.modeling_xlm.MultiHeadAttention
94
+ class MultiHeadAttention(nn.Module):
95
+ NEW_ID = itertools.count()
96
+
97
+ def __init__(self, n_heads, dim, config):
98
+ super().__init__()
99
+ self.layer_id = next(MultiHeadAttention.NEW_ID)
100
+ self.dim = dim
101
+ self.n_heads = n_heads
102
+ self.dropout = config.attention_dropout
103
+ assert self.dim % self.n_heads == 0
104
+
105
+ self.q_lin = nn.Linear(dim, dim)
106
+ self.k_lin = nn.Linear(dim, dim)
107
+ self.v_lin = nn.Linear(dim, dim)
108
+ self.out_lin = nn.Linear(dim, dim)
109
+ self.pruned_heads = set()
110
+
111
+ def prune_heads(self, heads):
112
+ attention_head_size = self.dim // self.n_heads
113
+ if len(heads) == 0:
114
+ return
115
+ heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
116
+ # Prune linear layers
117
+ self.q_lin = prune_linear_layer(self.q_lin, index)
118
+ self.k_lin = prune_linear_layer(self.k_lin, index)
119
+ self.v_lin = prune_linear_layer(self.v_lin, index)
120
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
121
+ # Update hyper params
122
+ self.n_heads = self.n_heads - len(heads)
123
+ self.dim = attention_head_size * self.n_heads
124
+ self.pruned_heads = self.pruned_heads.union(heads)
125
+
126
+ def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False):
127
+ """
128
+ Self-attention (if kv is None) or attention over source sentence (provided by kv).
129
+ """
130
+ # Input is (bs, qlen, dim)
131
+ # Mask is (bs, klen) (non-causal) or (bs, klen, klen)
132
+ bs, qlen, dim = input.size()
133
+ if kv is None:
134
+ klen = qlen if cache is None else cache["slen"] + qlen
135
+ else:
136
+ klen = kv.size(1)
137
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
138
+ n_heads = self.n_heads
139
+ dim_per_head = self.dim // n_heads
140
+ mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
141
+
142
+ def shape(x):
143
+ """projection"""
144
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
145
+
146
+ def unshape(x):
147
+ """compute context"""
148
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
149
+
150
+ q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
151
+ if kv is None:
152
+ k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
153
+ v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
154
+ elif cache is None or self.layer_id not in cache:
155
+ k = v = kv
156
+ k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
157
+ v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
158
+
159
+ if cache is not None:
160
+ if self.layer_id in cache:
161
+ if kv is None:
162
+ k_, v_ = cache[self.layer_id]
163
+ k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
164
+ v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
165
+ else:
166
+ k, v = cache[self.layer_id]
167
+ cache[self.layer_id] = (k, v)
168
+
169
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
170
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
171
+ mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
172
+ scores.masked_fill_(mask, torch.finfo(scores.dtype).min) # (bs, n_heads, qlen, klen)
173
+
174
+ weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
175
+ weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
176
+
177
+ # Mask heads if we want to
178
+ if head_mask is not None:
179
+ weights = weights * head_mask
180
+
181
+ context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
182
+ context = unshape(context) # (bs, qlen, dim)
183
+
184
+ outputs = (self.out_lin(context),)
185
+ if output_attentions:
186
+ outputs = outputs + (weights,)
187
+ return outputs
188
+
189
+
190
+ # Copied from transformers.models.xlm.modeling_xlm.TransformerFFN
191
+ class TransformerFFN(nn.Module):
192
+ def __init__(self, in_dim, dim_hidden, out_dim, config):
193
+ super().__init__()
194
+ self.dropout = config.dropout
195
+ self.lin1 = nn.Linear(in_dim, dim_hidden)
196
+ self.lin2 = nn.Linear(dim_hidden, out_dim)
197
+ self.act = gelu if config.gelu_activation else nn.functional.relu
198
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
199
+ self.seq_len_dim = 1
200
+
201
+ def forward(self, input):
202
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
203
+
204
+ def ff_chunk(self, input):
205
+ x = self.lin1(input)
206
+ x = self.act(x)
207
+ x = self.lin2(x)
208
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
209
+ return x
210
+
211
+
212
+ FLAUBERT_START_DOCSTRING = r"""
213
+
214
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
215
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
216
+ etc.)
217
+
218
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
219
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
220
+ and behavior.
221
+
222
+ Parameters:
223
+ config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.
224
+ Initializing with a config file does not load the weights associated with the model, only the
225
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
226
+ """
227
+
228
+ FLAUBERT_INPUTS_DOCSTRING = r"""
229
+ Args:
230
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
231
+ Indices of input sequence tokens in the vocabulary.
232
+
233
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
234
+ [`PreTrainedTokenizer.__call__`] for details.
235
+
236
+ [What are input IDs?](../glossary#input-ids)
237
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
238
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
239
+
240
+ - 1 for tokens that are **not masked**,
241
+ - 0 for tokens that are **masked**.
242
+
243
+ [What are attention masks?](../glossary#attention-mask)
244
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
245
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
246
+ 1]`:
247
+
248
+ - 0 corresponds to a *sentence A* token,
249
+ - 1 corresponds to a *sentence B* token.
250
+
251
+ [What are token type IDs?](../glossary#token-type-ids)
252
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
253
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
254
+ config.max_position_embeddings - 1]`.
255
+
256
+ [What are position IDs?](../glossary#position-ids)
257
+ lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
258
+ Length of each sentence that can be used to avoid performing attention on padding token indices. You can
259
+ also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
260
+ `[0, ..., input_ids.size(-1)]`:
261
+ cache (`Dict[str, torch.FloatTensor]`, *optional*):
262
+ Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
263
+ attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
264
+ decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
265
+ hidden-states.
266
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
267
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
268
+
269
+ - 1 indicates the head is **not masked**,
270
+ - 0 indicates the head is **masked**.
271
+
272
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
273
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
274
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
275
+ model's internal embedding lookup matrix.
276
+ output_attentions (`bool`, *optional*):
277
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
278
+ tensors for more detail.
279
+ output_hidden_states (`bool`, *optional*):
280
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
281
+ more detail.
282
+ return_dict (`bool`, *optional*):
283
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
284
+ """
285
+
286
+
287
+ @add_start_docstrings(
288
+ "The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
289
+ FLAUBERT_START_DOCSTRING,
290
+ )
291
+ # Copied from transformers.models.xlm.modeling_xlm.XLMPredLayer with XLM->Flaubert
292
+ class FlaubertPredLayer(nn.Module):
293
+ """
294
+ Prediction layer (cross_entropy or adaptive_softmax).
295
+ """
296
+
297
+ def __init__(self, config):
298
+ super().__init__()
299
+ self.asm = config.asm
300
+ self.n_words = config.n_words
301
+ self.pad_index = config.pad_index
302
+ dim = config.emb_dim
303
+
304
+ if config.asm is False:
305
+ self.proj = nn.Linear(dim, config.n_words, bias=True)
306
+ else:
307
+ self.proj = nn.AdaptiveLogSoftmaxWithLoss(
308
+ in_features=dim,
309
+ n_classes=config.n_words,
310
+ cutoffs=config.asm_cutoffs,
311
+ div_value=config.asm_div_value,
312
+ head_bias=True, # default is False
313
+ )
314
+
315
+ def forward(self, x, y=None):
316
+ """Compute the loss, and optionally the scores."""
317
+ outputs = ()
318
+ if self.asm is False:
319
+ scores = self.proj(x)
320
+ outputs = (scores,) + outputs
321
+ if y is not None:
322
+ loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="mean")
323
+ outputs = (loss,) + outputs
324
+ else:
325
+ scores = self.proj.log_prob(x)
326
+ outputs = (scores,) + outputs
327
+ if y is not None:
328
+ _, loss = self.proj(x, y)
329
+ outputs = (loss,) + outputs
330
+
331
+ return outputs
332
+
333
+
334
+ # Copied from transformers.models.xlm.modeling_xlm.XLMPreTrainedModel with XLM->Flaubert
335
+ class FlaubertPreTrainedModel(PreTrainedModel):
336
+ """
337
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
338
+ models.
339
+ """
340
+
341
+ config_class = FlaubertConfig
342
+ load_tf_weights = None
343
+ base_model_prefix = "transformer"
344
+
345
+ def __init__(self, *inputs, **kwargs):
346
+ super().__init__(*inputs, **kwargs)
347
+
348
+ @property
349
+ def dummy_inputs(self):
350
+ inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
351
+ attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
352
+ if self.config.use_lang_emb and self.config.n_langs > 1:
353
+ langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
354
+ else:
355
+ langs_list = None
356
+ return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
357
+
358
+ def _init_weights(self, module):
359
+ """Initialize the weights."""
360
+ if isinstance(module, nn.Embedding):
361
+ if self.config is not None and self.config.embed_init_std is not None:
362
+ nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
363
+ if module.padding_idx is not None:
364
+ module.weight.data[module.padding_idx].zero_()
365
+ if isinstance(module, nn.Linear):
366
+ if self.config is not None and self.config.init_std is not None:
367
+ nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
368
+ if module.bias is not None:
369
+ nn.init.constant_(module.bias, 0.0)
370
+ if isinstance(module, nn.LayerNorm):
371
+ module.bias.data.zero_()
372
+ module.weight.data.fill_(1.0)
373
+ if isinstance(module, FlaubertModel) and self.config.sinusoidal_embeddings:
374
+ create_sinusoidal_embeddings(
375
+ self.config.max_position_embeddings, self.config.emb_dim, out=module.position_embeddings.weight
376
+ )
377
+
378
+
379
+ class FlaubertModel(FlaubertPreTrainedModel):
380
+ def __init__(self, config): # , dico, is_encoder, with_output):
381
+ super().__init__(config)
382
+
383
+ # encoder / decoder, output layer
384
+ self.is_encoder = config.is_encoder
385
+ self.is_decoder = not config.is_encoder
386
+ if self.is_decoder:
387
+ raise NotImplementedError("Currently Flaubert can only be used as an encoder")
388
+ # self.with_output = with_output
389
+ self.causal = config.causal
390
+
391
+ # dictionary / languages
392
+ self.n_langs = config.n_langs
393
+ self.use_lang_emb = config.use_lang_emb
394
+ self.n_words = config.n_words
395
+ self.eos_index = config.eos_index
396
+ self.pad_index = config.pad_index
397
+ # self.dico = dico
398
+ # self.id2lang = config.id2lang
399
+ # self.lang2id = config.lang2id
400
+ # assert len(self.dico) == self.n_words
401
+ # assert len(self.id2lang) == len(self.lang2id) == self.n_langs
402
+
403
+ # model parameters
404
+ self.dim = config.emb_dim # 512 by default
405
+ self.hidden_dim = self.dim * 4 # 2048 by default
406
+ self.n_heads = config.n_heads # 8 by default
407
+ self.n_layers = config.n_layers
408
+ self.dropout = config.dropout
409
+ self.attention_dropout = config.attention_dropout
410
+ assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
411
+
412
+ # embeddings
413
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
414
+ if config.n_langs > 1 and config.use_lang_emb:
415
+ self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
416
+ self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
417
+ self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
418
+
419
+ # transformer layers
420
+ self.attentions = nn.ModuleList()
421
+ self.layer_norm1 = nn.ModuleList()
422
+ self.ffns = nn.ModuleList()
423
+ self.layer_norm2 = nn.ModuleList()
424
+ # if self.is_decoder:
425
+ # self.layer_norm15 = nn.ModuleList()
426
+ # self.encoder_attn = nn.ModuleList()
427
+
428
+ for _ in range(self.n_layers):
429
+ self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
430
+ self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
431
+ # if self.is_decoder:
432
+ # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
433
+ # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
434
+ self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
435
+ self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
436
+
437
+ if hasattr(config, "pruned_heads"):
438
+ pruned_heads = config.pruned_heads.copy().items()
439
+ config.pruned_heads = {}
440
+ for layer, heads in pruned_heads:
441
+ if self.attentions[int(layer)].n_heads == config.n_heads:
442
+ self.prune_heads({int(layer): list(map(int, heads))})
443
+
444
+ # Initialize weights and apply final processing
445
+ self.post_init()
446
+
447
+ self.layerdrop = getattr(config, "layerdrop", 0.0)
448
+ self.pre_norm = getattr(config, "pre_norm", False)
449
+ self.register_buffer(
450
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
451
+ )
452
+
453
+ # Copied from transformers.models.xlm.modeling_xlm.XLMModel.get_input_embeddings
454
+ def get_input_embeddings(self):
455
+ return self.embeddings
456
+
457
+ # Copied from transformers.models.xlm.modeling_xlm.XLMModel.set_input_embeddings
458
+ def set_input_embeddings(self, new_embeddings):
459
+ self.embeddings = new_embeddings
460
+
461
+ # Copied from transformers.models.xlm.modeling_xlm.XLMModel._prune_heads
462
+ def _prune_heads(self, heads_to_prune):
463
+ """
464
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
465
+ class PreTrainedModel
466
+ """
467
+ for layer, heads in heads_to_prune.items():
468
+ self.attentions[layer].prune_heads(heads)
469
+
470
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
471
+ @add_code_sample_docstrings(
472
+ checkpoint=_CHECKPOINT_FOR_DOC,
473
+ output_type=BaseModelOutput,
474
+ config_class=_CONFIG_FOR_DOC,
475
+ )
476
+ def forward(
477
+ self,
478
+ input_ids: Optional[torch.LongTensor] = None,
479
+ attention_mask: Optional[torch.FloatTensor] = None,
480
+ langs: Optional[torch.Tensor] = None,
481
+ token_type_ids: Optional[torch.LongTensor] = None,
482
+ position_ids: Optional[torch.LongTensor] = None,
483
+ lengths: Optional[torch.LongTensor] = None,
484
+ cache: Optional[Dict[str, torch.FloatTensor]] = None,
485
+ head_mask: Optional[torch.FloatTensor] = None,
486
+ inputs_embeds: Optional[torch.FloatTensor] = None,
487
+ output_attentions: Optional[bool] = None,
488
+ output_hidden_states: Optional[bool] = None,
489
+ return_dict: Optional[bool] = None,
490
+ ) -> Union[Tuple, BaseModelOutput]:
491
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
492
+ output_hidden_states = (
493
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
494
+ )
495
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
496
+
497
+ # removed: src_enc=None, src_len=None
498
+ if input_ids is not None:
499
+ bs, slen = input_ids.size()
500
+ else:
501
+ bs, slen = inputs_embeds.size()[:-1]
502
+
503
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
504
+
505
+ if lengths is None:
506
+ if input_ids is not None:
507
+ lengths = (input_ids != self.pad_index).sum(dim=1).long()
508
+ else:
509
+ lengths = torch.tensor([slen] * bs, device=device)
510
+ # mask = input_ids != self.pad_index
511
+
512
+ # check inputs
513
+ assert lengths.size(0) == bs
514
+ assert lengths.max().item() <= slen
515
+ # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
516
+ # assert (src_enc is None) == (src_len is None)
517
+ # if src_enc is not None:
518
+ # assert self.is_decoder
519
+ # assert src_enc.size(0) == bs
520
+
521
+ # generate masks
522
+ mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
523
+ # if self.is_decoder and src_enc is not None:
524
+ # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
525
+
526
+ # Setting the position-ids to the registered buffer in constructor, it helps
527
+ # when tracing the model without passing position-ids, solves
528
+ # isues similar to issue #5664
529
+ if position_ids is None:
530
+ if hasattr(self, "position_ids"):
531
+ position_ids = self.position_ids[:, :slen]
532
+ position_ids = position_ids.expand((bs, slen))
533
+ else:
534
+ position_ids = torch.arange(slen, dtype=torch.long, device=device)
535
+ position_ids = position_ids.unsqueeze(0).expand((bs, slen))
536
+ else:
537
+ assert position_ids.size() == (bs, slen) # (slen, bs)
538
+ # position_ids = position_ids.transpose(0, 1)
539
+
540
+ # langs
541
+ if langs is not None:
542
+ assert langs.size() == (bs, slen) # (slen, bs)
543
+ # langs = langs.transpose(0, 1)
544
+
545
+ # Prepare head mask if needed
546
+ head_mask = self.get_head_mask(head_mask, self.config.n_layers)
547
+
548
+ # do not recompute cached elements
549
+ if cache is not None and input_ids is not None:
550
+ _slen = slen - cache["slen"]
551
+ input_ids = input_ids[:, -_slen:]
552
+ position_ids = position_ids[:, -_slen:]
553
+ if langs is not None:
554
+ langs = langs[:, -_slen:]
555
+ mask = mask[:, -_slen:]
556
+ attn_mask = attn_mask[:, -_slen:]
557
+
558
+ # embeddings
559
+ if inputs_embeds is None:
560
+ inputs_embeds = self.embeddings(input_ids)
561
+
562
+ tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
563
+ if langs is not None and self.use_lang_emb and self.config.n_langs > 1:
564
+ tensor = tensor + self.lang_embeddings(langs)
565
+ if token_type_ids is not None:
566
+ tensor = tensor + self.embeddings(token_type_ids)
567
+ tensor = self.layer_norm_emb(tensor)
568
+ tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training)
569
+ tensor *= mask.unsqueeze(-1).to(tensor.dtype)
570
+
571
+ # transformer layers
572
+ hidden_states = () if output_hidden_states else None
573
+ attentions = () if output_attentions else None
574
+ for i in range(self.n_layers):
575
+ # LayerDrop
576
+ if self.training:
577
+ dropout_probability = torch.rand([])
578
+ if dropout_probability < self.layerdrop:
579
+ continue
580
+
581
+ if output_hidden_states:
582
+ hidden_states = hidden_states + (tensor,)
583
+
584
+ # self attention
585
+ if not self.pre_norm:
586
+ attn_outputs = self.attentions[i](
587
+ tensor,
588
+ attn_mask,
589
+ cache=cache,
590
+ head_mask=head_mask[i],
591
+ output_attentions=output_attentions,
592
+ )
593
+ attn = attn_outputs[0]
594
+ if output_attentions:
595
+ attentions = attentions + (attn_outputs[1],)
596
+ attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
597
+ tensor = tensor + attn
598
+ tensor = self.layer_norm1[i](tensor)
599
+ else:
600
+ tensor_normalized = self.layer_norm1[i](tensor)
601
+ attn_outputs = self.attentions[i](tensor_normalized, attn_mask, cache=cache, head_mask=head_mask[i])
602
+ attn = attn_outputs[0]
603
+ if output_attentions:
604
+ attentions = attentions + (attn_outputs[1],)
605
+ attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
606
+ tensor = tensor + attn
607
+
608
+ # encoder attention (for decoder only)
609
+ # if self.is_decoder and src_enc is not None:
610
+ # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
611
+ # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
612
+ # tensor = tensor + attn
613
+ # tensor = self.layer_norm15[i](tensor)
614
+
615
+ # FFN
616
+ if not self.pre_norm:
617
+ tensor = tensor + self.ffns[i](tensor)
618
+ tensor = self.layer_norm2[i](tensor)
619
+ else:
620
+ tensor_normalized = self.layer_norm2[i](tensor)
621
+ tensor = tensor + self.ffns[i](tensor_normalized)
622
+
623
+ tensor *= mask.unsqueeze(-1).to(tensor.dtype)
624
+
625
+ # Add last hidden state
626
+ if output_hidden_states:
627
+ hidden_states = hidden_states + (tensor,)
628
+
629
+ # update cache length
630
+ if cache is not None:
631
+ cache["slen"] += tensor.size(1)
632
+
633
+ # move back sequence length to dimension 0
634
+ # tensor = tensor.transpose(0, 1)
635
+
636
+ if not return_dict:
637
+ return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
638
+
639
+ return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
640
+
641
+
642
+ @add_start_docstrings(
643
+ """
644
+ The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input
645
+ embeddings).
646
+ """,
647
+ FLAUBERT_START_DOCSTRING,
648
+ )
649
+ # Copied transformers.models.xlm.modeling_xlm.XLMWithLMHeadModel with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
650
+ class FlaubertWithLMHeadModel(FlaubertPreTrainedModel):
651
+ _tied_weights_keys = ["pred_layer.proj.weight"]
652
+
653
+ def __init__(self, config):
654
+ super().__init__(config)
655
+ self.transformer = FlaubertModel(config)
656
+ self.pred_layer = FlaubertPredLayer(config)
657
+
658
+ # Initialize weights and apply final processing
659
+ self.post_init()
660
+
661
+ def get_output_embeddings(self):
662
+ return self.pred_layer.proj
663
+
664
+ def set_output_embeddings(self, new_embeddings):
665
+ self.pred_layer.proj = new_embeddings
666
+
667
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
668
+ mask_token_id = self.config.mask_token_id
669
+ lang_id = self.config.lang_id
670
+
671
+ effective_batch_size = input_ids.shape[0]
672
+ mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
673
+ input_ids = torch.cat([input_ids, mask_token], dim=1)
674
+ if lang_id is not None:
675
+ langs = torch.full_like(input_ids, lang_id)
676
+ else:
677
+ langs = None
678
+ return {"input_ids": input_ids, "langs": langs}
679
+
680
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
681
+ @add_code_sample_docstrings(
682
+ checkpoint=_CHECKPOINT_FOR_DOC,
683
+ output_type=MaskedLMOutput,
684
+ config_class=_CONFIG_FOR_DOC,
685
+ mask="<special1>",
686
+ )
687
+ def forward(
688
+ self,
689
+ input_ids: Optional[torch.Tensor] = None,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ langs: Optional[torch.Tensor] = None,
692
+ token_type_ids: Optional[torch.Tensor] = None,
693
+ position_ids: Optional[torch.Tensor] = None,
694
+ lengths: Optional[torch.Tensor] = None,
695
+ cache: Optional[Dict[str, torch.Tensor]] = None,
696
+ head_mask: Optional[torch.Tensor] = None,
697
+ inputs_embeds: Optional[torch.Tensor] = None,
698
+ labels: Optional[torch.Tensor] = None,
699
+ output_attentions: Optional[bool] = None,
700
+ output_hidden_states: Optional[bool] = None,
701
+ return_dict: Optional[bool] = None,
702
+ ) -> Union[Tuple, MaskedLMOutput]:
703
+ r"""
704
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
705
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
706
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
707
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
708
+ """
709
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
710
+
711
+ transformer_outputs = self.transformer(
712
+ input_ids,
713
+ attention_mask=attention_mask,
714
+ langs=langs,
715
+ token_type_ids=token_type_ids,
716
+ position_ids=position_ids,
717
+ lengths=lengths,
718
+ cache=cache,
719
+ head_mask=head_mask,
720
+ inputs_embeds=inputs_embeds,
721
+ output_attentions=output_attentions,
722
+ output_hidden_states=output_hidden_states,
723
+ return_dict=return_dict,
724
+ )
725
+
726
+ output = transformer_outputs[0]
727
+ outputs = self.pred_layer(output, labels) # (loss, logits) or (logits,) depending on if labels are provided.
728
+
729
+ if not return_dict:
730
+ return outputs + transformer_outputs[1:]
731
+
732
+ return MaskedLMOutput(
733
+ loss=outputs[0] if labels is not None else None,
734
+ logits=outputs[0] if labels is None else outputs[1],
735
+ hidden_states=transformer_outputs.hidden_states,
736
+ attentions=transformer_outputs.attentions,
737
+ )
738
+
739
+
740
+ @add_start_docstrings(
741
+ """
742
+ Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
743
+ e.g. for GLUE tasks.
744
+ """,
745
+ FLAUBERT_START_DOCSTRING,
746
+ )
747
+ # Copied transformers.models.xlm.modeling_xlm.XLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
748
+ class FlaubertForSequenceClassification(FlaubertPreTrainedModel):
749
+ def __init__(self, config):
750
+ super().__init__(config)
751
+ self.num_labels = config.num_labels
752
+ self.config = config
753
+
754
+ self.transformer = FlaubertModel(config)
755
+ self.sequence_summary = SequenceSummary(config)
756
+
757
+ # Initialize weights and apply final processing
758
+ self.post_init()
759
+
760
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
761
+ @add_code_sample_docstrings(
762
+ checkpoint=_CHECKPOINT_FOR_DOC,
763
+ output_type=SequenceClassifierOutput,
764
+ config_class=_CONFIG_FOR_DOC,
765
+ )
766
+ def forward(
767
+ self,
768
+ input_ids: Optional[torch.Tensor] = None,
769
+ attention_mask: Optional[torch.Tensor] = None,
770
+ langs: Optional[torch.Tensor] = None,
771
+ token_type_ids: Optional[torch.Tensor] = None,
772
+ position_ids: Optional[torch.Tensor] = None,
773
+ lengths: Optional[torch.Tensor] = None,
774
+ cache: Optional[Dict[str, torch.Tensor]] = None,
775
+ head_mask: Optional[torch.Tensor] = None,
776
+ inputs_embeds: Optional[torch.Tensor] = None,
777
+ labels: Optional[torch.Tensor] = None,
778
+ output_attentions: Optional[bool] = None,
779
+ output_hidden_states: Optional[bool] = None,
780
+ return_dict: Optional[bool] = None,
781
+ ) -> Union[Tuple, SequenceClassifierOutput]:
782
+ r"""
783
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
784
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
785
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
786
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
787
+ """
788
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
789
+
790
+ transformer_outputs = self.transformer(
791
+ input_ids,
792
+ attention_mask=attention_mask,
793
+ langs=langs,
794
+ token_type_ids=token_type_ids,
795
+ position_ids=position_ids,
796
+ lengths=lengths,
797
+ cache=cache,
798
+ head_mask=head_mask,
799
+ inputs_embeds=inputs_embeds,
800
+ output_attentions=output_attentions,
801
+ output_hidden_states=output_hidden_states,
802
+ return_dict=return_dict,
803
+ )
804
+
805
+ output = transformer_outputs[0]
806
+ logits = self.sequence_summary(output)
807
+
808
+ loss = None
809
+ if labels is not None:
810
+ if self.config.problem_type is None:
811
+ if self.num_labels == 1:
812
+ self.config.problem_type = "regression"
813
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
814
+ self.config.problem_type = "single_label_classification"
815
+ else:
816
+ self.config.problem_type = "multi_label_classification"
817
+
818
+ if self.config.problem_type == "regression":
819
+ loss_fct = MSELoss()
820
+ if self.num_labels == 1:
821
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
822
+ else:
823
+ loss = loss_fct(logits, labels)
824
+ elif self.config.problem_type == "single_label_classification":
825
+ loss_fct = CrossEntropyLoss()
826
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
827
+ elif self.config.problem_type == "multi_label_classification":
828
+ loss_fct = BCEWithLogitsLoss()
829
+ loss = loss_fct(logits, labels)
830
+
831
+ if not return_dict:
832
+ output = (logits,) + transformer_outputs[1:]
833
+ return ((loss,) + output) if loss is not None else output
834
+
835
+ return SequenceClassifierOutput(
836
+ loss=loss,
837
+ logits=logits,
838
+ hidden_states=transformer_outputs.hidden_states,
839
+ attentions=transformer_outputs.attentions,
840
+ )
841
+
842
+
843
+ @add_start_docstrings(
844
+ """
845
+ Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
846
+ Named-Entity-Recognition (NER) tasks.
847
+ """,
848
+ FLAUBERT_START_DOCSTRING,
849
+ )
850
+ # Copied from transformers.models.xlm.modeling_xlm.XLMForTokenClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
851
+ class FlaubertForTokenClassification(FlaubertPreTrainedModel):
852
+ def __init__(self, config):
853
+ super().__init__(config)
854
+ self.num_labels = config.num_labels
855
+
856
+ self.transformer = FlaubertModel(config)
857
+ self.dropout = nn.Dropout(config.dropout)
858
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
859
+
860
+ # Initialize weights and apply final processing
861
+ self.post_init()
862
+
863
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
864
+ @add_code_sample_docstrings(
865
+ checkpoint=_CHECKPOINT_FOR_DOC,
866
+ output_type=TokenClassifierOutput,
867
+ config_class=_CONFIG_FOR_DOC,
868
+ )
869
+ def forward(
870
+ self,
871
+ input_ids: Optional[torch.Tensor] = None,
872
+ attention_mask: Optional[torch.Tensor] = None,
873
+ langs: Optional[torch.Tensor] = None,
874
+ token_type_ids: Optional[torch.Tensor] = None,
875
+ position_ids: Optional[torch.Tensor] = None,
876
+ lengths: Optional[torch.Tensor] = None,
877
+ cache: Optional[Dict[str, torch.Tensor]] = None,
878
+ head_mask: Optional[torch.Tensor] = None,
879
+ inputs_embeds: Optional[torch.Tensor] = None,
880
+ labels: Optional[torch.Tensor] = None,
881
+ output_attentions: Optional[bool] = None,
882
+ output_hidden_states: Optional[bool] = None,
883
+ return_dict: Optional[bool] = None,
884
+ ) -> Union[Tuple, TokenClassifierOutput]:
885
+ r"""
886
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
887
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
888
+ """
889
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
890
+
891
+ outputs = self.transformer(
892
+ input_ids,
893
+ attention_mask=attention_mask,
894
+ langs=langs,
895
+ token_type_ids=token_type_ids,
896
+ position_ids=position_ids,
897
+ lengths=lengths,
898
+ cache=cache,
899
+ head_mask=head_mask,
900
+ inputs_embeds=inputs_embeds,
901
+ output_attentions=output_attentions,
902
+ output_hidden_states=output_hidden_states,
903
+ return_dict=return_dict,
904
+ )
905
+
906
+ sequence_output = outputs[0]
907
+
908
+ sequence_output = self.dropout(sequence_output)
909
+ logits = self.classifier(sequence_output)
910
+
911
+ loss = None
912
+ if labels is not None:
913
+ loss_fct = CrossEntropyLoss()
914
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
915
+
916
+ if not return_dict:
917
+ output = (logits,) + outputs[1:]
918
+ return ((loss,) + output) if loss is not None else output
919
+
920
+ return TokenClassifierOutput(
921
+ loss=loss,
922
+ logits=logits,
923
+ hidden_states=outputs.hidden_states,
924
+ attentions=outputs.attentions,
925
+ )
926
+
927
+
928
+ @add_start_docstrings(
929
+ """
930
+ Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
931
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
932
+ """,
933
+ FLAUBERT_START_DOCSTRING,
934
+ )
935
+ # Copied from transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
936
+ class FlaubertForQuestionAnsweringSimple(FlaubertPreTrainedModel):
937
+ def __init__(self, config):
938
+ super().__init__(config)
939
+
940
+ self.transformer = FlaubertModel(config)
941
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
942
+
943
+ # Initialize weights and apply final processing
944
+ self.post_init()
945
+
946
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
947
+ @add_code_sample_docstrings(
948
+ checkpoint=_CHECKPOINT_FOR_DOC,
949
+ output_type=QuestionAnsweringModelOutput,
950
+ config_class=_CONFIG_FOR_DOC,
951
+ )
952
+ def forward(
953
+ self,
954
+ input_ids: Optional[torch.Tensor] = None,
955
+ attention_mask: Optional[torch.Tensor] = None,
956
+ langs: Optional[torch.Tensor] = None,
957
+ token_type_ids: Optional[torch.Tensor] = None,
958
+ position_ids: Optional[torch.Tensor] = None,
959
+ lengths: Optional[torch.Tensor] = None,
960
+ cache: Optional[Dict[str, torch.Tensor]] = None,
961
+ head_mask: Optional[torch.Tensor] = None,
962
+ inputs_embeds: Optional[torch.Tensor] = None,
963
+ start_positions: Optional[torch.Tensor] = None,
964
+ end_positions: Optional[torch.Tensor] = None,
965
+ output_attentions: Optional[bool] = None,
966
+ output_hidden_states: Optional[bool] = None,
967
+ return_dict: Optional[bool] = None,
968
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
969
+ r"""
970
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
971
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
972
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
973
+ are not taken into account for computing the loss.
974
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
975
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
976
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
977
+ are not taken into account for computing the loss.
978
+ """
979
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
980
+
981
+ transformer_outputs = self.transformer(
982
+ input_ids,
983
+ attention_mask=attention_mask,
984
+ langs=langs,
985
+ token_type_ids=token_type_ids,
986
+ position_ids=position_ids,
987
+ lengths=lengths,
988
+ cache=cache,
989
+ head_mask=head_mask,
990
+ inputs_embeds=inputs_embeds,
991
+ output_attentions=output_attentions,
992
+ output_hidden_states=output_hidden_states,
993
+ return_dict=return_dict,
994
+ )
995
+
996
+ sequence_output = transformer_outputs[0]
997
+
998
+ logits = self.qa_outputs(sequence_output)
999
+ start_logits, end_logits = logits.split(1, dim=-1)
1000
+ start_logits = start_logits.squeeze(-1).contiguous()
1001
+ end_logits = end_logits.squeeze(-1).contiguous()
1002
+
1003
+ total_loss = None
1004
+ if start_positions is not None and end_positions is not None:
1005
+ # If we are on multi-GPU, split add a dimension
1006
+ if len(start_positions.size()) > 1:
1007
+ start_positions = start_positions.squeeze(-1)
1008
+ if len(end_positions.size()) > 1:
1009
+ end_positions = end_positions.squeeze(-1)
1010
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1011
+ ignored_index = start_logits.size(1)
1012
+ start_positions = start_positions.clamp(0, ignored_index)
1013
+ end_positions = end_positions.clamp(0, ignored_index)
1014
+
1015
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1016
+ start_loss = loss_fct(start_logits, start_positions)
1017
+ end_loss = loss_fct(end_logits, end_positions)
1018
+ total_loss = (start_loss + end_loss) / 2
1019
+
1020
+ if not return_dict:
1021
+ output = (start_logits, end_logits) + transformer_outputs[1:]
1022
+ return ((total_loss,) + output) if total_loss is not None else output
1023
+
1024
+ return QuestionAnsweringModelOutput(
1025
+ loss=total_loss,
1026
+ start_logits=start_logits,
1027
+ end_logits=end_logits,
1028
+ hidden_states=transformer_outputs.hidden_states,
1029
+ attentions=transformer_outputs.attentions,
1030
+ )
1031
+
1032
+
1033
+ @add_start_docstrings(
1034
+ """
1035
+ Flaubert Model with a beam-search span classification head on top for extractive question-answering tasks like
1036
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1037
+ """,
1038
+ FLAUBERT_START_DOCSTRING,
1039
+ )
1040
+ @dataclass
1041
+ # Copied from transformer.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput with XLM->Flaubert
1042
+ class FlaubertForQuestionAnsweringOutput(ModelOutput):
1043
+ """
1044
+ Base class for outputs of question answering models using a `SquadHead`.
1045
+
1046
+ Args:
1047
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
1048
+ Classification loss as the sum of start token, end token (and is_impossible if provided) classification
1049
+ losses.
1050
+ start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1051
+ Log probabilities for the top config.start_n_top start token possibilities (beam-search).
1052
+ start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1053
+ Indices for the top config.start_n_top start token possibilities (beam-search).
1054
+ end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1055
+ Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
1056
+ (beam-search).
1057
+ end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1058
+ Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
1059
+ cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
1060
+ Log probabilities for the `is_impossible` label of the answers.
1061
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1062
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
1063
+ shape `(batch_size, sequence_length, hidden_size)`.
1064
+
1065
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1066
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1067
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1068
+ sequence_length)`.
1069
+
1070
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1071
+ heads.
1072
+ """
1073
+
1074
+ loss: Optional[torch.FloatTensor] = None
1075
+ start_top_log_probs: Optional[torch.FloatTensor] = None
1076
+ start_top_index: Optional[torch.LongTensor] = None
1077
+ end_top_log_probs: Optional[torch.FloatTensor] = None
1078
+ end_top_index: Optional[torch.LongTensor] = None
1079
+ cls_logits: Optional[torch.FloatTensor] = None
1080
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
1081
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
1082
+
1083
+
1084
+ # Copied from transformer.models.xlm.modeling_xlm.XLMForQuestionAnswering with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1085
+ class FlaubertForQuestionAnswering(FlaubertPreTrainedModel):
1086
+ def __init__(self, config):
1087
+ super().__init__(config)
1088
+
1089
+ self.transformer = FlaubertModel(config)
1090
+ self.qa_outputs = SQuADHead(config)
1091
+
1092
+ # Initialize weights and apply final processing
1093
+ self.post_init()
1094
+
1095
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1096
+ @replace_return_docstrings(output_type=FlaubertForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
1097
+ def forward(
1098
+ self,
1099
+ input_ids: Optional[torch.Tensor] = None,
1100
+ attention_mask: Optional[torch.Tensor] = None,
1101
+ langs: Optional[torch.Tensor] = None,
1102
+ token_type_ids: Optional[torch.Tensor] = None,
1103
+ position_ids: Optional[torch.Tensor] = None,
1104
+ lengths: Optional[torch.Tensor] = None,
1105
+ cache: Optional[Dict[str, torch.Tensor]] = None,
1106
+ head_mask: Optional[torch.Tensor] = None,
1107
+ inputs_embeds: Optional[torch.Tensor] = None,
1108
+ start_positions: Optional[torch.Tensor] = None,
1109
+ end_positions: Optional[torch.Tensor] = None,
1110
+ is_impossible: Optional[torch.Tensor] = None,
1111
+ cls_index: Optional[torch.Tensor] = None,
1112
+ p_mask: Optional[torch.Tensor] = None,
1113
+ output_attentions: Optional[bool] = None,
1114
+ output_hidden_states: Optional[bool] = None,
1115
+ return_dict: Optional[bool] = None,
1116
+ ) -> Union[Tuple, FlaubertForQuestionAnsweringOutput]:
1117
+ r"""
1118
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1119
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1120
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1121
+ are not taken into account for computing the loss.
1122
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1123
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1124
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1125
+ are not taken into account for computing the loss.
1126
+ is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1127
+ Labels whether a question has an answer or no answer (SQuAD 2.0)
1128
+ cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1129
+ Labels for position (index) of the classification token to use as input for computing plausibility of the
1130
+ answer.
1131
+ p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1132
+ Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
1133
+ masked. 0.0 mean token is not masked.
1134
+
1135
+ Returns:
1136
+
1137
+ Example:
1138
+
1139
+ ```python
1140
+ >>> from transformers import XLMTokenizer, XLMForQuestionAnswering
1141
+ >>> import torch
1142
+
1143
+ >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
1144
+ >>> model = XLMForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048")
1145
+
1146
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
1147
+ ... 0
1148
+ ... ) # Batch size 1
1149
+ >>> start_positions = torch.tensor([1])
1150
+ >>> end_positions = torch.tensor([3])
1151
+
1152
+ >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
1153
+ >>> loss = outputs.loss
1154
+ ```"""
1155
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1156
+
1157
+ transformer_outputs = self.transformer(
1158
+ input_ids,
1159
+ attention_mask=attention_mask,
1160
+ langs=langs,
1161
+ token_type_ids=token_type_ids,
1162
+ position_ids=position_ids,
1163
+ lengths=lengths,
1164
+ cache=cache,
1165
+ head_mask=head_mask,
1166
+ inputs_embeds=inputs_embeds,
1167
+ output_attentions=output_attentions,
1168
+ output_hidden_states=output_hidden_states,
1169
+ return_dict=return_dict,
1170
+ )
1171
+
1172
+ output = transformer_outputs[0]
1173
+
1174
+ outputs = self.qa_outputs(
1175
+ output,
1176
+ start_positions=start_positions,
1177
+ end_positions=end_positions,
1178
+ cls_index=cls_index,
1179
+ is_impossible=is_impossible,
1180
+ p_mask=p_mask,
1181
+ return_dict=return_dict,
1182
+ )
1183
+
1184
+ if not return_dict:
1185
+ return outputs + transformer_outputs[1:]
1186
+
1187
+ return FlaubertForQuestionAnsweringOutput(
1188
+ loss=outputs.loss,
1189
+ start_top_log_probs=outputs.start_top_log_probs,
1190
+ start_top_index=outputs.start_top_index,
1191
+ end_top_log_probs=outputs.end_top_log_probs,
1192
+ end_top_index=outputs.end_top_index,
1193
+ cls_logits=outputs.cls_logits,
1194
+ hidden_states=transformer_outputs.hidden_states,
1195
+ attentions=transformer_outputs.attentions,
1196
+ )
1197
+
1198
+
1199
+ @add_start_docstrings(
1200
+ """
1201
+ Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1202
+ softmax) e.g. for RocStories/SWAG tasks.
1203
+ """,
1204
+ FLAUBERT_START_DOCSTRING,
1205
+ )
1206
+ # Copied from transformer.models.xlm.modeling_xlm.XLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1207
+ class FlaubertForMultipleChoice(FlaubertPreTrainedModel):
1208
+ def __init__(self, config, *inputs, **kwargs):
1209
+ super().__init__(config, *inputs, **kwargs)
1210
+
1211
+ self.transformer = FlaubertModel(config)
1212
+ self.sequence_summary = SequenceSummary(config)
1213
+ self.logits_proj = nn.Linear(config.num_labels, 1)
1214
+
1215
+ # Initialize weights and apply final processing
1216
+ self.post_init()
1217
+
1218
+ @add_start_docstrings_to_model_forward(
1219
+ FLAUBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1220
+ )
1221
+ @add_code_sample_docstrings(
1222
+ checkpoint=_CHECKPOINT_FOR_DOC,
1223
+ output_type=MultipleChoiceModelOutput,
1224
+ config_class=_CONFIG_FOR_DOC,
1225
+ )
1226
+ def forward(
1227
+ self,
1228
+ input_ids: Optional[torch.Tensor] = None,
1229
+ attention_mask: Optional[torch.Tensor] = None,
1230
+ langs: Optional[torch.Tensor] = None,
1231
+ token_type_ids: Optional[torch.Tensor] = None,
1232
+ position_ids: Optional[torch.Tensor] = None,
1233
+ lengths: Optional[torch.Tensor] = None,
1234
+ cache: Optional[Dict[str, torch.Tensor]] = None,
1235
+ head_mask: Optional[torch.Tensor] = None,
1236
+ inputs_embeds: Optional[torch.Tensor] = None,
1237
+ labels: Optional[torch.Tensor] = None,
1238
+ output_attentions: Optional[bool] = None,
1239
+ output_hidden_states: Optional[bool] = None,
1240
+ return_dict: Optional[bool] = None,
1241
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1242
+ r"""
1243
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1244
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1245
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1246
+ `input_ids` above)
1247
+ """
1248
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1249
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1250
+
1251
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1252
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1253
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1254
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1255
+ langs = langs.view(-1, langs.size(-1)) if langs is not None else None
1256
+ inputs_embeds = (
1257
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1258
+ if inputs_embeds is not None
1259
+ else None
1260
+ )
1261
+
1262
+ if lengths is not None:
1263
+ logger.warning(
1264
+ "The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the "
1265
+ "attention mask instead."
1266
+ )
1267
+ lengths = None
1268
+
1269
+ transformer_outputs = self.transformer(
1270
+ input_ids=input_ids,
1271
+ attention_mask=attention_mask,
1272
+ langs=langs,
1273
+ token_type_ids=token_type_ids,
1274
+ position_ids=position_ids,
1275
+ lengths=lengths,
1276
+ cache=cache,
1277
+ head_mask=head_mask,
1278
+ inputs_embeds=inputs_embeds,
1279
+ output_attentions=output_attentions,
1280
+ output_hidden_states=output_hidden_states,
1281
+ return_dict=return_dict,
1282
+ )
1283
+ output = transformer_outputs[0]
1284
+ logits = self.sequence_summary(output)
1285
+ logits = self.logits_proj(logits)
1286
+ reshaped_logits = logits.view(-1, num_choices)
1287
+
1288
+ loss = None
1289
+ if labels is not None:
1290
+ loss_fct = CrossEntropyLoss()
1291
+ loss = loss_fct(reshaped_logits, labels)
1292
+
1293
+ if not return_dict:
1294
+ output = (reshaped_logits,) + transformer_outputs[1:]
1295
+ return ((loss,) + output) if loss is not None else output
1296
+
1297
+ return MultipleChoiceModelOutput(
1298
+ loss=loss,
1299
+ logits=reshaped_logits,
1300
+ hidden_states=transformer_outputs.hidden_states,
1301
+ attentions=transformer_outputs.attentions,
1302
+ )
venv/lib/python3.10/site-packages/transformers/models/flaubert/modeling_tf_flaubert.py ADDED
@@ -0,0 +1,1337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ TF 2.0 Flaubert model.
17
+ """
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import itertools
23
+ import random
24
+ import warnings
25
+ from dataclasses import dataclass
26
+ from typing import Dict, Optional, Tuple, Union
27
+
28
+ import numpy as np
29
+ import tensorflow as tf
30
+
31
+ from ...activations_tf import get_tf_activation
32
+ from ...modeling_tf_outputs import (
33
+ TFBaseModelOutput,
34
+ TFMultipleChoiceModelOutput,
35
+ TFQuestionAnsweringModelOutput,
36
+ TFSequenceClassifierOutput,
37
+ TFTokenClassifierOutput,
38
+ )
39
+ from ...modeling_tf_utils import (
40
+ TFModelInputType,
41
+ TFMultipleChoiceLoss,
42
+ TFPreTrainedModel,
43
+ TFQuestionAnsweringLoss,
44
+ TFSequenceClassificationLoss,
45
+ TFSequenceSummary,
46
+ TFSharedEmbeddings,
47
+ TFTokenClassificationLoss,
48
+ get_initializer,
49
+ keras,
50
+ keras_serializable,
51
+ unpack_inputs,
52
+ )
53
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
54
+ from ...utils import (
55
+ MULTIPLE_CHOICE_DUMMY_INPUTS,
56
+ ModelOutput,
57
+ add_code_sample_docstrings,
58
+ add_start_docstrings,
59
+ add_start_docstrings_to_model_forward,
60
+ logging,
61
+ )
62
+ from .configuration_flaubert import FlaubertConfig
63
+
64
+
65
+ logger = logging.get_logger(__name__)
66
+
67
+ _CHECKPOINT_FOR_DOC = "flaubert/flaubert_base_cased"
68
+ _CONFIG_FOR_DOC = "FlaubertConfig"
69
+
70
+
71
+ from ..deprecated._archive_maps import TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
72
+
73
+
74
+ FLAUBERT_START_DOCSTRING = r"""
75
+
76
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
77
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
78
+ etc.)
79
+
80
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
81
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
82
+ behavior.
83
+
84
+ <Tip>
85
+
86
+ TensorFlow models and layers in `transformers` accept two formats as input:
87
+
88
+ - having all inputs as keyword arguments (like PyTorch models), or
89
+ - having all inputs as a list, tuple or dict in the first positional argument.
90
+
91
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
92
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
93
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
94
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
95
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
96
+ positional argument:
97
+
98
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
99
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
100
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
101
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
102
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
103
+
104
+ Note that when creating models and layers with
105
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
106
+ about any of this, as you can just pass inputs like you would to any other Python function!
107
+
108
+ </Tip>
109
+
110
+ Parameters:
111
+ config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.
112
+ Initializing with a config file does not load the weights associated with the model, only the
113
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
114
+ """
115
+
116
+ FLAUBERT_INPUTS_DOCSTRING = r"""
117
+ Args:
118
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
119
+ Indices of input sequence tokens in the vocabulary.
120
+
121
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
122
+ [`PreTrainedTokenizer.encode`] for details.
123
+
124
+ [What are input IDs?](../glossary#input-ids)
125
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
126
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
127
+
128
+ - `1` for tokens that are **not masked**,
129
+ - `0` for tokens that are **masked**.
130
+
131
+ [What are attention masks?](../glossary#attention-mask)
132
+ langs (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
133
+ A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
134
+ languages ids which can be obtained from the language names by using two conversion mappings provided in
135
+ the configuration of the model (only provided for multilingual models). More precisely, the *language name
136
+ to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
137
+ *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
138
+
139
+ See usage examples detailed in the [multilingual documentation](../multilingual).
140
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
141
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
142
+ 1]`:
143
+
144
+ - `0` corresponds to a *sentence A* token,
145
+ - `1` corresponds to a *sentence B* token.
146
+
147
+ [What are token type IDs?](../glossary#token-type-ids)
148
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
149
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
150
+ config.max_position_embeddings - 1]`.
151
+
152
+ [What are position IDs?](../glossary#position-ids)
153
+ lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*):
154
+ Length of each sentence that can be used to avoid performing attention on padding token indices. You can
155
+ also use *attention_mask* for the same result (see above), kept here for compatibility Indices selected in
156
+ `[0, ..., input_ids.size(-1)]`:
157
+ cache (`Dict[str, tf.Tensor]`, *optional*):
158
+ Dictionary string to `tf.FloatTensor` that contains precomputed hidden states (key and values in the
159
+ attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
160
+ decoding.
161
+
162
+ The dictionary object will be modified in-place during the forward pass to add newly computed
163
+ hidden-states.
164
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
165
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
166
+
167
+ - `1` indicates the head is **not masked**,
168
+ - `0` indicates the head is **masked**.
169
+
170
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
171
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
172
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
173
+ model's internal embedding lookup matrix.
174
+ output_attentions (`bool`, *optional*):
175
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
176
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
177
+ config will be used instead.
178
+ output_hidden_states (`bool`, *optional*):
179
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
180
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
181
+ used instead.
182
+ return_dict (`bool`, *optional*):
183
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
184
+ eager mode, in graph mode the value will always be set to True.
185
+ training (`bool`, *optional*, defaults to `False`):
186
+ Whether or not to use the model in training mode (some modules like dropout modules have different
187
+ behaviors between training and evaluation).
188
+ """
189
+
190
+
191
+ def get_masks(slen, lengths, causal, padding_mask=None):
192
+ """
193
+ Generate hidden states mask, and optionally an attention mask.
194
+ """
195
+ bs = shape_list(lengths)[0]
196
+ if padding_mask is not None:
197
+ mask = padding_mask
198
+ else:
199
+ # assert lengths.max().item() <= slen
200
+ alen = tf.range(slen, dtype=lengths.dtype)
201
+ mask = alen < tf.expand_dims(lengths, axis=1)
202
+
203
+ # attention mask is the same as mask, or triangular inferior attention (causal)
204
+ if causal:
205
+ attn_mask = tf.less_equal(
206
+ tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))
207
+ )
208
+ else:
209
+ attn_mask = mask
210
+
211
+ # sanity check
212
+ # assert shape_list(mask) == [bs, slen]
213
+ tf.debugging.assert_equal(shape_list(mask), [bs, slen])
214
+ if causal:
215
+ tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen])
216
+
217
+ return mask, attn_mask
218
+
219
+
220
+ class TFFlaubertPreTrainedModel(TFPreTrainedModel):
221
+ """
222
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
223
+ models.
224
+ """
225
+
226
+ config_class = FlaubertConfig
227
+ base_model_prefix = "transformer"
228
+
229
+ @property
230
+ def dummy_inputs(self):
231
+ # Sometimes Flaubert has language embeddings so don't forget to build them as well if needed
232
+ inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int32)
233
+ attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32)
234
+ if self.config.use_lang_emb and self.config.n_langs > 1:
235
+ return {
236
+ "input_ids": inputs_list,
237
+ "attention_mask": attns_list,
238
+ "langs": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32),
239
+ }
240
+ else:
241
+ return {"input_ids": inputs_list, "attention_mask": attns_list}
242
+
243
+
244
+ @add_start_docstrings(
245
+ "The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
246
+ FLAUBERT_START_DOCSTRING,
247
+ )
248
+ class TFFlaubertModel(TFFlaubertPreTrainedModel):
249
+ def __init__(self, config, *inputs, **kwargs):
250
+ super().__init__(config, *inputs, **kwargs)
251
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
252
+
253
+ @unpack_inputs
254
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
255
+ @add_code_sample_docstrings(
256
+ checkpoint=_CHECKPOINT_FOR_DOC,
257
+ output_type=TFBaseModelOutput,
258
+ config_class=_CONFIG_FOR_DOC,
259
+ )
260
+ def call(
261
+ self,
262
+ input_ids: np.ndarray | tf.Tensor | None = None,
263
+ attention_mask: np.ndarray | tf.Tensor | None = None,
264
+ langs: np.ndarray | tf.Tensor | None = None,
265
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
266
+ position_ids: np.ndarray | tf.Tensor | None = None,
267
+ lengths: np.ndarray | tf.Tensor | None = None,
268
+ cache: Optional[Dict[str, tf.Tensor]] = None,
269
+ head_mask: np.ndarray | tf.Tensor | None = None,
270
+ inputs_embeds: tf.Tensor | None = None,
271
+ output_attentions: Optional[bool] = None,
272
+ output_hidden_states: Optional[bool] = None,
273
+ return_dict: Optional[bool] = None,
274
+ training: Optional[bool] = False,
275
+ ) -> Union[Tuple, TFBaseModelOutput]:
276
+ outputs = self.transformer(
277
+ input_ids=input_ids,
278
+ attention_mask=attention_mask,
279
+ langs=langs,
280
+ token_type_ids=token_type_ids,
281
+ position_ids=position_ids,
282
+ lengths=lengths,
283
+ cache=cache,
284
+ head_mask=head_mask,
285
+ inputs_embeds=inputs_embeds,
286
+ output_attentions=output_attentions,
287
+ output_hidden_states=output_hidden_states,
288
+ return_dict=return_dict,
289
+ training=training,
290
+ )
291
+
292
+ return outputs
293
+
294
+ def build(self, input_shape=None):
295
+ if self.built:
296
+ return
297
+ self.built = True
298
+ if getattr(self, "transformer", None) is not None:
299
+ with tf.name_scope(self.transformer.name):
300
+ self.transformer.build(None)
301
+
302
+
303
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert
304
+ class TFFlaubertMultiHeadAttention(keras.layers.Layer):
305
+ NEW_ID = itertools.count()
306
+
307
+ def __init__(self, n_heads, dim, config, **kwargs):
308
+ super().__init__(**kwargs)
309
+ self.layer_id = next(TFFlaubertMultiHeadAttention.NEW_ID)
310
+ self.dim = dim
311
+ self.n_heads = n_heads
312
+ self.output_attentions = config.output_attentions
313
+ assert self.dim % self.n_heads == 0
314
+
315
+ self.q_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin")
316
+ self.k_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin")
317
+ self.v_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin")
318
+ self.out_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin")
319
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
320
+ self.pruned_heads = set()
321
+ self.dim = dim
322
+
323
+ def prune_heads(self, heads):
324
+ raise NotImplementedError
325
+
326
+ def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False):
327
+ """
328
+ Self-attention (if kv is None) or attention over source sentence (provided by kv).
329
+ """
330
+ # Input is (bs, qlen, dim)
331
+ # Mask is (bs, klen) (non-causal) or (bs, klen, klen)
332
+ bs, qlen, dim = shape_list(input)
333
+
334
+ if kv is None:
335
+ klen = qlen if cache is None else cache["slen"] + qlen
336
+ else:
337
+ klen = shape_list(kv)[1]
338
+
339
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
340
+ dim_per_head = self.dim // self.n_heads
341
+ mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)
342
+
343
+ def shape(x):
344
+ """projection"""
345
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
346
+
347
+ def unshape(x):
348
+ """compute context"""
349
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
350
+
351
+ q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
352
+
353
+ if kv is None:
354
+ k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
355
+ v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
356
+ elif cache is None or self.layer_id not in cache:
357
+ k = v = kv
358
+ k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
359
+ v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
360
+
361
+ if cache is not None:
362
+ if self.layer_id in cache:
363
+ if kv is None:
364
+ k_, v_ = cache[self.layer_id]
365
+ k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)
366
+ v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)
367
+ else:
368
+ k, v = cache[self.layer_id]
369
+
370
+ cache[self.layer_id] = (k, v)
371
+
372
+ f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype)
373
+ q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head)
374
+ k = tf.cast(k, dtype=q.dtype)
375
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)
376
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
377
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
378
+ mask = tf.cast(mask, dtype=scores.dtype)
379
+ scores = scores - 1e30 * (1.0 - mask)
380
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
381
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
382
+
383
+ # Mask heads if we want to
384
+ if head_mask is not None:
385
+ weights = weights * head_mask
386
+
387
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
388
+ context = unshape(context) # (bs, qlen, dim)
389
+ outputs = (self.out_lin(context),)
390
+
391
+ if output_attentions:
392
+ outputs = outputs + (weights,)
393
+
394
+ return outputs
395
+
396
+ def build(self, input_shape=None):
397
+ if self.built:
398
+ return
399
+ self.built = True
400
+ if getattr(self, "q_lin", None) is not None:
401
+ with tf.name_scope(self.q_lin.name):
402
+ self.q_lin.build([None, None, self.dim])
403
+ if getattr(self, "k_lin", None) is not None:
404
+ with tf.name_scope(self.k_lin.name):
405
+ self.k_lin.build([None, None, self.dim])
406
+ if getattr(self, "v_lin", None) is not None:
407
+ with tf.name_scope(self.v_lin.name):
408
+ self.v_lin.build([None, None, self.dim])
409
+ if getattr(self, "out_lin", None) is not None:
410
+ with tf.name_scope(self.out_lin.name):
411
+ self.out_lin.build([None, None, self.dim])
412
+
413
+
414
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMTransformerFFN
415
+ class TFFlaubertTransformerFFN(keras.layers.Layer):
416
+ def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):
417
+ super().__init__(**kwargs)
418
+
419
+ self.lin1 = keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1")
420
+ self.lin2 = keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2")
421
+ self.act = get_tf_activation("gelu") if config.gelu_activation else get_tf_activation("relu")
422
+ self.dropout = keras.layers.Dropout(config.dropout)
423
+ self.in_dim = in_dim
424
+ self.dim_hidden = dim_hidden
425
+
426
+ def call(self, input, training=False):
427
+ x = self.lin1(input)
428
+ x = self.act(x)
429
+ x = self.lin2(x)
430
+ x = self.dropout(x, training=training)
431
+
432
+ return x
433
+
434
+ def build(self, input_shape=None):
435
+ if self.built:
436
+ return
437
+ self.built = True
438
+ if getattr(self, "lin1", None) is not None:
439
+ with tf.name_scope(self.lin1.name):
440
+ self.lin1.build([None, None, self.in_dim])
441
+ if getattr(self, "lin2", None) is not None:
442
+ with tf.name_scope(self.lin2.name):
443
+ self.lin2.build([None, None, self.dim_hidden])
444
+
445
+
446
+ @keras_serializable
447
+ class TFFlaubertMainLayer(keras.layers.Layer):
448
+ config_class = FlaubertConfig
449
+
450
+ def __init__(self, config, **kwargs):
451
+ super().__init__(**kwargs)
452
+
453
+ self.config = config
454
+ self.n_heads = config.n_heads
455
+ self.n_langs = config.n_langs
456
+ self.dim = config.emb_dim
457
+ self.hidden_dim = self.dim * 4
458
+ self.n_words = config.n_words
459
+ self.pad_index = config.pad_index
460
+ self.causal = config.causal
461
+ self.n_layers = config.n_layers
462
+ self.use_lang_emb = config.use_lang_emb
463
+ self.layerdrop = getattr(config, "layerdrop", 0.0)
464
+ self.pre_norm = getattr(config, "pre_norm", False)
465
+ self.output_attentions = config.output_attentions
466
+ self.output_hidden_states = config.output_hidden_states
467
+ self.return_dict = config.use_return_dict
468
+ self.max_position_embeddings = config.max_position_embeddings
469
+ self.embed_init_std = config.embed_init_std
470
+ self.dropout = keras.layers.Dropout(config.dropout)
471
+ self.embeddings = TFSharedEmbeddings(
472
+ self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings"
473
+ )
474
+ self.layer_norm_emb = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb")
475
+ self.attentions = []
476
+ self.layer_norm1 = []
477
+ self.ffns = []
478
+ self.layer_norm2 = []
479
+
480
+ for i in range(self.n_layers):
481
+ self.attentions.append(
482
+ TFFlaubertMultiHeadAttention(self.n_heads, self.dim, config=config, name=f"attentions_._{i}")
483
+ )
484
+ self.layer_norm1.append(
485
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm1_._{i}")
486
+ )
487
+ # if self.is_decoder:
488
+ # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
489
+ # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
490
+ self.ffns.append(
491
+ TFFlaubertTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f"ffns_._{i}")
492
+ )
493
+ self.layer_norm2.append(
494
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm2_._{i}")
495
+ )
496
+
497
+ def build(self, input_shape=None):
498
+ with tf.name_scope("position_embeddings"):
499
+ self.position_embeddings = self.add_weight(
500
+ name="embeddings",
501
+ shape=[self.max_position_embeddings, self.dim],
502
+ initializer=get_initializer(self.embed_init_std),
503
+ )
504
+
505
+ if self.n_langs > 1 and self.use_lang_emb:
506
+ with tf.name_scope("lang_embeddings"):
507
+ self.lang_embeddings = self.add_weight(
508
+ name="embeddings",
509
+ shape=[self.n_langs, self.dim],
510
+ initializer=get_initializer(self.embed_init_std),
511
+ )
512
+
513
+ if self.built:
514
+ return
515
+ self.built = True
516
+ if getattr(self, "embeddings", None) is not None:
517
+ with tf.name_scope(self.embeddings.name):
518
+ self.embeddings.build(None)
519
+ if getattr(self, "layer_norm_emb", None) is not None:
520
+ with tf.name_scope(self.layer_norm_emb.name):
521
+ self.layer_norm_emb.build([None, None, self.dim])
522
+ for layer in self.attentions:
523
+ with tf.name_scope(layer.name):
524
+ layer.build(None)
525
+ for layer in self.layer_norm1:
526
+ with tf.name_scope(layer.name):
527
+ layer.build([None, None, self.dim])
528
+ for layer in self.ffns:
529
+ with tf.name_scope(layer.name):
530
+ layer.build(None)
531
+ for layer in self.layer_norm2:
532
+ with tf.name_scope(layer.name):
533
+ layer.build([None, None, self.dim])
534
+
535
+ def get_input_embeddings(self):
536
+ return self.embeddings
537
+
538
+ def set_input_embeddings(self, value):
539
+ self.embeddings.weight = value
540
+ self.embeddings.vocab_size = shape_list(value)[0]
541
+
542
+ @unpack_inputs
543
+ def call(
544
+ self,
545
+ input_ids: np.ndarray | tf.Tensor | None = None,
546
+ attention_mask: np.ndarray | tf.Tensor | None = None,
547
+ langs: np.ndarray | tf.Tensor | None = None,
548
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
549
+ position_ids: np.ndarray | tf.Tensor | None = None,
550
+ lengths: np.ndarray | tf.Tensor | None = None,
551
+ cache: Optional[Dict[str, tf.Tensor]] = None,
552
+ head_mask: np.ndarray | tf.Tensor | None = None,
553
+ inputs_embeds: tf.Tensor | None = None,
554
+ output_attentions: Optional[bool] = None,
555
+ output_hidden_states: Optional[bool] = None,
556
+ return_dict: Optional[bool] = None,
557
+ training: Optional[bool] = False,
558
+ ) -> Union[Tuple, TFBaseModelOutput]:
559
+ # removed: src_enc=None, src_len=None
560
+
561
+ if input_ids is not None and inputs_embeds is not None:
562
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
563
+ elif input_ids is not None:
564
+ bs, slen = shape_list(input_ids)
565
+ elif inputs_embeds is not None:
566
+ bs, slen = shape_list(inputs_embeds)[:2]
567
+ else:
568
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
569
+
570
+ if lengths is None:
571
+ if input_ids is not None:
572
+ lengths = tf.reduce_sum(
573
+ tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1
574
+ )
575
+ else:
576
+ lengths = tf.convert_to_tensor([slen] * bs)
577
+ # mask = input_ids != self.pad_index
578
+
579
+ # check inputs
580
+ # assert shape_list(lengths)[0] == bs
581
+ (
582
+ tf.debugging.assert_equal(shape_list(lengths)[0], bs),
583
+ f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched",
584
+ )
585
+ # assert lengths.max().item() <= slen
586
+ # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
587
+ # assert (src_enc is None) == (src_len is None)
588
+ # if src_enc is not None:
589
+ # assert self.is_decoder
590
+ # assert src_enc.size(0) == bs
591
+
592
+ # generate masks
593
+ mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
594
+ # if self.is_decoder and src_enc is not None:
595
+ # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
596
+
597
+ # position_ids
598
+ if position_ids is None:
599
+ position_ids = tf.expand_dims(tf.range(slen), axis=0)
600
+ position_ids = tf.tile(position_ids, (bs, 1))
601
+
602
+ # assert shape_list(position_ids) == [bs, slen] # (slen, bs)
603
+ (
604
+ tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]),
605
+ f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched",
606
+ )
607
+ # position_ids = position_ids.transpose(0, 1)
608
+
609
+ # langs
610
+ if langs is not None:
611
+ # assert shape_list(langs) == [bs, slen] # (slen, bs)
612
+ (
613
+ tf.debugging.assert_equal(shape_list(langs), [bs, slen]),
614
+ f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched",
615
+ )
616
+ # langs = langs.transpose(0, 1)
617
+
618
+ # Prepare head mask if needed
619
+ # 1.0 in head_mask indicate we keep the head
620
+ # attention_probs has shape bsz x n_heads x N x N
621
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
622
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
623
+ if head_mask is not None:
624
+ raise NotImplementedError
625
+ else:
626
+ head_mask = [None] * self.n_layers
627
+
628
+ # do not recompute cached elements
629
+ if cache is not None and input_ids is not None:
630
+ _slen = slen - cache["slen"]
631
+ input_ids = input_ids[:, -_slen:]
632
+ position_ids = position_ids[:, -_slen:]
633
+ if langs is not None:
634
+ langs = langs[:, -_slen:]
635
+ mask = mask[:, -_slen:]
636
+ attn_mask = attn_mask[:, -_slen:]
637
+
638
+ # embeddings
639
+ if inputs_embeds is None:
640
+ check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size)
641
+ inputs_embeds = self.embeddings(input_ids)
642
+
643
+ tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids)
644
+
645
+ if langs is not None and self.use_lang_emb:
646
+ tensor = tensor + tf.gather(self.lang_embeddings, langs)
647
+ if token_type_ids is not None:
648
+ tensor = tensor + self.embeddings(token_type_ids)
649
+
650
+ tensor = self.layer_norm_emb(tensor)
651
+ tensor = self.dropout(tensor, training=training)
652
+ mask = tf.cast(mask, dtype=tensor.dtype)
653
+ tensor = tensor * tf.expand_dims(mask, axis=-1)
654
+
655
+ # hidden_states and attentions cannot be None in graph mode.
656
+ hidden_states = () if output_hidden_states else None
657
+ attentions = () if output_attentions else None
658
+
659
+ # transformer layers
660
+ for i in range(self.n_layers):
661
+ # LayerDrop
662
+ dropout_probability = random.uniform(0, 1)
663
+
664
+ if training and (dropout_probability < self.layerdrop):
665
+ continue
666
+
667
+ if output_hidden_states:
668
+ hidden_states = hidden_states + (tensor,)
669
+
670
+ # self attention
671
+ if not self.pre_norm:
672
+ attn_outputs = self.attentions[i](
673
+ tensor,
674
+ attn_mask,
675
+ None,
676
+ cache,
677
+ head_mask[i],
678
+ output_attentions,
679
+ training=training,
680
+ )
681
+ attn = attn_outputs[0]
682
+
683
+ if output_attentions:
684
+ attentions = attentions + (attn_outputs[1],)
685
+
686
+ attn = self.dropout(attn, training=training)
687
+ tensor = tensor + attn
688
+ tensor = self.layer_norm1[i](tensor)
689
+ else:
690
+ tensor_normalized = self.layer_norm1[i](tensor)
691
+ attn_outputs = self.attentions[i](
692
+ tensor_normalized,
693
+ attn_mask,
694
+ None,
695
+ cache,
696
+ head_mask[i],
697
+ output_attentions,
698
+ training=training,
699
+ )
700
+ attn = attn_outputs[0]
701
+
702
+ if output_attentions:
703
+ attentions = attentions + (attn_outputs[1],)
704
+
705
+ attn = self.dropout(attn, training=training)
706
+ tensor = tensor + attn
707
+
708
+ # encoder attention (for decoder only)
709
+ # if self.is_decoder and src_enc is not None:
710
+ # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
711
+ # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
712
+ # tensor = tensor + attn
713
+ # tensor = self.layer_norm15[i](tensor)
714
+
715
+ # FFN
716
+ if not self.pre_norm:
717
+ tensor = tensor + self.ffns[i](tensor)
718
+ tensor = self.layer_norm2[i](tensor)
719
+ else:
720
+ tensor_normalized = self.layer_norm2[i](tensor)
721
+ tensor = tensor + self.ffns[i](tensor_normalized)
722
+
723
+ tensor = tensor * tf.expand_dims(mask, axis=-1)
724
+
725
+ # Add last hidden state
726
+ if output_hidden_states:
727
+ hidden_states = hidden_states + (tensor,)
728
+
729
+ # update cache length
730
+ if cache is not None:
731
+ cache["slen"] += tensor.size(1)
732
+
733
+ # move back sequence length to dimension 0
734
+ # tensor = tensor.transpose(0, 1)
735
+
736
+ if not return_dict:
737
+ return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
738
+
739
+ return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
740
+
741
+
742
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMPredLayer
743
+ class TFFlaubertPredLayer(keras.layers.Layer):
744
+ """
745
+ Prediction layer (cross_entropy or adaptive_softmax).
746
+ """
747
+
748
+ def __init__(self, config, input_embeddings, **kwargs):
749
+ super().__init__(**kwargs)
750
+
751
+ self.asm = config.asm
752
+ self.n_words = config.n_words
753
+ self.pad_index = config.pad_index
754
+
755
+ if config.asm is False:
756
+ self.input_embeddings = input_embeddings
757
+ else:
758
+ raise NotImplementedError
759
+ # self.proj = nn.AdaptiveLogSoftmaxWithLoss(
760
+ # in_features=dim,
761
+ # n_classes=config.n_words,
762
+ # cutoffs=config.asm_cutoffs,
763
+ # div_value=config.asm_div_value,
764
+ # head_bias=True, # default is False
765
+ # )
766
+
767
+ def build(self, input_shape):
768
+ # The output weights are the same as the input embeddings, but there is an output-only bias for each token.
769
+ self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias")
770
+
771
+ super().build(input_shape)
772
+
773
+ def get_output_embeddings(self):
774
+ return self.input_embeddings
775
+
776
+ def set_output_embeddings(self, value):
777
+ self.input_embeddings.weight = value
778
+ self.input_embeddings.vocab_size = shape_list(value)[0]
779
+
780
+ def get_bias(self):
781
+ return {"bias": self.bias}
782
+
783
+ def set_bias(self, value):
784
+ self.bias = value["bias"]
785
+ self.vocab_size = shape_list(value["bias"])[0]
786
+
787
+ def call(self, hidden_states):
788
+ hidden_states = self.input_embeddings(hidden_states, mode="linear")
789
+ hidden_states = hidden_states + self.bias
790
+
791
+ return hidden_states
792
+
793
+
794
+ @dataclass
795
+ class TFFlaubertWithLMHeadModelOutput(ModelOutput):
796
+ """
797
+ Base class for [`TFFlaubertWithLMHeadModel`] outputs.
798
+
799
+ Args:
800
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
801
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
802
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
803
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
804
+ `(batch_size, sequence_length, hidden_size)`.
805
+
806
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
807
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
808
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
809
+ sequence_length)`.
810
+
811
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
812
+ heads.
813
+ """
814
+
815
+ logits: tf.Tensor = None
816
+ hidden_states: Tuple[tf.Tensor] | None = None
817
+ attentions: Tuple[tf.Tensor] | None = None
818
+
819
+
820
+ @add_start_docstrings(
821
+ """
822
+ The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input
823
+ embeddings).
824
+ """,
825
+ FLAUBERT_START_DOCSTRING,
826
+ )
827
+ class TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel):
828
+ def __init__(self, config, *inputs, **kwargs):
829
+ super().__init__(config, *inputs, **kwargs)
830
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
831
+ self.pred_layer = TFFlaubertPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj")
832
+ # Flaubert does not have past caching features
833
+ self.supports_xla_generation = False
834
+
835
+ def get_lm_head(self):
836
+ return self.pred_layer
837
+
838
+ def get_prefix_bias_name(self):
839
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
840
+ return self.name + "/" + self.pred_layer.name
841
+
842
+ def prepare_inputs_for_generation(self, inputs, **kwargs):
843
+ mask_token_id = self.config.mask_token_id
844
+ lang_id = self.config.lang_id
845
+
846
+ effective_batch_size = inputs.shape[0]
847
+ mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id
848
+ inputs = tf.concat([inputs, mask_token], axis=1)
849
+
850
+ if lang_id is not None:
851
+ langs = tf.ones_like(inputs) * lang_id
852
+ else:
853
+ langs = None
854
+ return {"input_ids": inputs, "langs": langs}
855
+
856
+ @unpack_inputs
857
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
858
+ @add_code_sample_docstrings(
859
+ checkpoint=_CHECKPOINT_FOR_DOC,
860
+ output_type=TFFlaubertWithLMHeadModelOutput,
861
+ config_class=_CONFIG_FOR_DOC,
862
+ )
863
+ def call(
864
+ self,
865
+ input_ids: np.ndarray | tf.Tensor | None = None,
866
+ attention_mask: np.ndarray | tf.Tensor | None = None,
867
+ langs: np.ndarray | tf.Tensor | None = None,
868
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
869
+ position_ids: np.ndarray | tf.Tensor | None = None,
870
+ lengths: np.ndarray | tf.Tensor | None = None,
871
+ cache: Optional[Dict[str, tf.Tensor]] = None,
872
+ head_mask: np.ndarray | tf.Tensor | None = None,
873
+ inputs_embeds: tf.Tensor | None = None,
874
+ output_attentions: Optional[bool] = None,
875
+ output_hidden_states: Optional[bool] = None,
876
+ return_dict: Optional[bool] = None,
877
+ training: Optional[bool] = False,
878
+ ) -> Union[Tuple, TFFlaubertWithLMHeadModelOutput]:
879
+ transformer_outputs = self.transformer(
880
+ input_ids=input_ids,
881
+ attention_mask=attention_mask,
882
+ langs=langs,
883
+ token_type_ids=token_type_ids,
884
+ position_ids=position_ids,
885
+ lengths=lengths,
886
+ cache=cache,
887
+ head_mask=head_mask,
888
+ inputs_embeds=inputs_embeds,
889
+ output_attentions=output_attentions,
890
+ output_hidden_states=output_hidden_states,
891
+ return_dict=return_dict,
892
+ training=training,
893
+ )
894
+ output = transformer_outputs[0]
895
+ outputs = self.pred_layer(output)
896
+
897
+ if not return_dict:
898
+ return (outputs,) + transformer_outputs[1:]
899
+
900
+ return TFFlaubertWithLMHeadModelOutput(
901
+ logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions
902
+ )
903
+
904
+ def build(self, input_shape=None):
905
+ if self.built:
906
+ return
907
+ self.built = True
908
+ if getattr(self, "transformer", None) is not None:
909
+ with tf.name_scope(self.transformer.name):
910
+ self.transformer.build(None)
911
+ if getattr(self, "pred_layer", None) is not None:
912
+ with tf.name_scope(self.pred_layer.name):
913
+ self.pred_layer.build(None)
914
+
915
+
916
+ @add_start_docstrings(
917
+ """
918
+ Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
919
+ e.g. for GLUE tasks.
920
+ """,
921
+ FLAUBERT_START_DOCSTRING,
922
+ )
923
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
924
+ class TFFlaubertForSequenceClassification(TFFlaubertPreTrainedModel, TFSequenceClassificationLoss):
925
+ def __init__(self, config, *inputs, **kwargs):
926
+ super().__init__(config, *inputs, **kwargs)
927
+ self.num_labels = config.num_labels
928
+
929
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
930
+ self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
931
+
932
+ @unpack_inputs
933
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
934
+ @add_code_sample_docstrings(
935
+ checkpoint=_CHECKPOINT_FOR_DOC,
936
+ output_type=TFSequenceClassifierOutput,
937
+ config_class=_CONFIG_FOR_DOC,
938
+ )
939
+ def call(
940
+ self,
941
+ input_ids: TFModelInputType | None = None,
942
+ attention_mask: np.ndarray | tf.Tensor | None = None,
943
+ langs: np.ndarray | tf.Tensor | None = None,
944
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
945
+ position_ids: np.ndarray | tf.Tensor | None = None,
946
+ lengths: np.ndarray | tf.Tensor | None = None,
947
+ cache: Optional[Dict[str, tf.Tensor]] = None,
948
+ head_mask: np.ndarray | tf.Tensor | None = None,
949
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
950
+ output_attentions: Optional[bool] = None,
951
+ output_hidden_states: Optional[bool] = None,
952
+ return_dict: Optional[bool] = None,
953
+ labels: np.ndarray | tf.Tensor | None = None,
954
+ training: bool = False,
955
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
956
+ r"""
957
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
958
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
959
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
960
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
961
+ """
962
+ transformer_outputs = self.transformer(
963
+ input_ids=input_ids,
964
+ attention_mask=attention_mask,
965
+ langs=langs,
966
+ token_type_ids=token_type_ids,
967
+ position_ids=position_ids,
968
+ lengths=lengths,
969
+ cache=cache,
970
+ head_mask=head_mask,
971
+ inputs_embeds=inputs_embeds,
972
+ output_attentions=output_attentions,
973
+ output_hidden_states=output_hidden_states,
974
+ return_dict=return_dict,
975
+ training=training,
976
+ )
977
+ output = transformer_outputs[0]
978
+
979
+ logits = self.sequence_summary(output)
980
+
981
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
982
+
983
+ if not return_dict:
984
+ output = (logits,) + transformer_outputs[1:]
985
+ return ((loss,) + output) if loss is not None else output
986
+
987
+ return TFSequenceClassifierOutput(
988
+ loss=loss,
989
+ logits=logits,
990
+ hidden_states=transformer_outputs.hidden_states,
991
+ attentions=transformer_outputs.attentions,
992
+ )
993
+
994
+ def build(self, input_shape=None):
995
+ if self.built:
996
+ return
997
+ self.built = True
998
+ if getattr(self, "transformer", None) is not None:
999
+ with tf.name_scope(self.transformer.name):
1000
+ self.transformer.build(None)
1001
+ if getattr(self, "sequence_summary", None) is not None:
1002
+ with tf.name_scope(self.sequence_summary.name):
1003
+ self.sequence_summary.build(None)
1004
+
1005
+
1006
+ @add_start_docstrings(
1007
+ """
1008
+ Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1009
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1010
+ """,
1011
+ FLAUBERT_START_DOCSTRING,
1012
+ )
1013
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1014
+ class TFFlaubertForQuestionAnsweringSimple(TFFlaubertPreTrainedModel, TFQuestionAnsweringLoss):
1015
+ def __init__(self, config, *inputs, **kwargs):
1016
+ super().__init__(config, *inputs, **kwargs)
1017
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
1018
+ self.qa_outputs = keras.layers.Dense(
1019
+ config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs"
1020
+ )
1021
+ self.config = config
1022
+
1023
+ @unpack_inputs
1024
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1025
+ @add_code_sample_docstrings(
1026
+ checkpoint=_CHECKPOINT_FOR_DOC,
1027
+ output_type=TFQuestionAnsweringModelOutput,
1028
+ config_class=_CONFIG_FOR_DOC,
1029
+ )
1030
+ def call(
1031
+ self,
1032
+ input_ids: TFModelInputType | None = None,
1033
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1034
+ langs: np.ndarray | tf.Tensor | None = None,
1035
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1036
+ position_ids: np.ndarray | tf.Tensor | None = None,
1037
+ lengths: np.ndarray | tf.Tensor | None = None,
1038
+ cache: Optional[Dict[str, tf.Tensor]] = None,
1039
+ head_mask: np.ndarray | tf.Tensor | None = None,
1040
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1041
+ output_attentions: Optional[bool] = None,
1042
+ output_hidden_states: Optional[bool] = None,
1043
+ return_dict: Optional[bool] = None,
1044
+ start_positions: np.ndarray | tf.Tensor | None = None,
1045
+ end_positions: np.ndarray | tf.Tensor | None = None,
1046
+ training: bool = False,
1047
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1048
+ r"""
1049
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1050
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1051
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1052
+ are not taken into account for computing the loss.
1053
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1054
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1055
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1056
+ are not taken into account for computing the loss.
1057
+ """
1058
+ transformer_outputs = self.transformer(
1059
+ input_ids=input_ids,
1060
+ attention_mask=attention_mask,
1061
+ langs=langs,
1062
+ token_type_ids=token_type_ids,
1063
+ position_ids=position_ids,
1064
+ lengths=lengths,
1065
+ cache=cache,
1066
+ head_mask=head_mask,
1067
+ inputs_embeds=inputs_embeds,
1068
+ output_attentions=output_attentions,
1069
+ output_hidden_states=output_hidden_states,
1070
+ return_dict=return_dict,
1071
+ training=training,
1072
+ )
1073
+ sequence_output = transformer_outputs[0]
1074
+
1075
+ logits = self.qa_outputs(sequence_output)
1076
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1077
+ start_logits = tf.squeeze(start_logits, axis=-1)
1078
+ end_logits = tf.squeeze(end_logits, axis=-1)
1079
+
1080
+ loss = None
1081
+ if start_positions is not None and end_positions is not None:
1082
+ labels = {"start_position": start_positions}
1083
+ labels["end_position"] = end_positions
1084
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1085
+
1086
+ if not return_dict:
1087
+ output = (start_logits, end_logits) + transformer_outputs[1:]
1088
+ return ((loss,) + output) if loss is not None else output
1089
+
1090
+ return TFQuestionAnsweringModelOutput(
1091
+ loss=loss,
1092
+ start_logits=start_logits,
1093
+ end_logits=end_logits,
1094
+ hidden_states=transformer_outputs.hidden_states,
1095
+ attentions=transformer_outputs.attentions,
1096
+ )
1097
+
1098
+ def build(self, input_shape=None):
1099
+ if self.built:
1100
+ return
1101
+ self.built = True
1102
+ if getattr(self, "transformer", None) is not None:
1103
+ with tf.name_scope(self.transformer.name):
1104
+ self.transformer.build(None)
1105
+ if getattr(self, "qa_outputs", None) is not None:
1106
+ with tf.name_scope(self.qa_outputs.name):
1107
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1108
+
1109
+
1110
+ @add_start_docstrings(
1111
+ """
1112
+ Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1113
+ Named-Entity-Recognition (NER) tasks.
1114
+ """,
1115
+ FLAUBERT_START_DOCSTRING,
1116
+ )
1117
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForTokenClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1118
+ class TFFlaubertForTokenClassification(TFFlaubertPreTrainedModel, TFTokenClassificationLoss):
1119
+ def __init__(self, config, *inputs, **kwargs):
1120
+ super().__init__(config, *inputs, **kwargs)
1121
+ self.num_labels = config.num_labels
1122
+
1123
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
1124
+ self.dropout = keras.layers.Dropout(config.dropout)
1125
+ self.classifier = keras.layers.Dense(
1126
+ config.num_labels, kernel_initializer=get_initializer(config.init_std), name="classifier"
1127
+ )
1128
+ self.config = config
1129
+
1130
+ @unpack_inputs
1131
+ @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1132
+ @add_code_sample_docstrings(
1133
+ checkpoint=_CHECKPOINT_FOR_DOC,
1134
+ output_type=TFTokenClassifierOutput,
1135
+ config_class=_CONFIG_FOR_DOC,
1136
+ )
1137
+ def call(
1138
+ self,
1139
+ input_ids: TFModelInputType | None = None,
1140
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1141
+ langs: np.ndarray | tf.Tensor | None = None,
1142
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1143
+ position_ids: np.ndarray | tf.Tensor | None = None,
1144
+ lengths: np.ndarray | tf.Tensor | None = None,
1145
+ cache: Optional[Dict[str, tf.Tensor]] = None,
1146
+ head_mask: np.ndarray | tf.Tensor | None = None,
1147
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1148
+ output_attentions: Optional[bool] = None,
1149
+ output_hidden_states: Optional[bool] = None,
1150
+ return_dict: Optional[bool] = None,
1151
+ labels: np.ndarray | tf.Tensor | None = None,
1152
+ training: bool = False,
1153
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1154
+ r"""
1155
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1156
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1157
+ """
1158
+ transformer_outputs = self.transformer(
1159
+ input_ids=input_ids,
1160
+ attention_mask=attention_mask,
1161
+ langs=langs,
1162
+ token_type_ids=token_type_ids,
1163
+ position_ids=position_ids,
1164
+ lengths=lengths,
1165
+ cache=cache,
1166
+ head_mask=head_mask,
1167
+ inputs_embeds=inputs_embeds,
1168
+ output_attentions=output_attentions,
1169
+ output_hidden_states=output_hidden_states,
1170
+ return_dict=return_dict,
1171
+ training=training,
1172
+ )
1173
+ sequence_output = transformer_outputs[0]
1174
+
1175
+ sequence_output = self.dropout(sequence_output, training=training)
1176
+ logits = self.classifier(sequence_output)
1177
+
1178
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1179
+
1180
+ if not return_dict:
1181
+ output = (logits,) + transformer_outputs[1:]
1182
+ return ((loss,) + output) if loss is not None else output
1183
+
1184
+ return TFTokenClassifierOutput(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ hidden_states=transformer_outputs.hidden_states,
1188
+ attentions=transformer_outputs.attentions,
1189
+ )
1190
+
1191
+ def build(self, input_shape=None):
1192
+ if self.built:
1193
+ return
1194
+ self.built = True
1195
+ if getattr(self, "transformer", None) is not None:
1196
+ with tf.name_scope(self.transformer.name):
1197
+ self.transformer.build(None)
1198
+ if getattr(self, "classifier", None) is not None:
1199
+ with tf.name_scope(self.classifier.name):
1200
+ self.classifier.build([None, None, self.config.hidden_size])
1201
+
1202
+
1203
+ @add_start_docstrings(
1204
+ """
1205
+ Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1206
+ softmax) e.g. for RocStories/SWAG tasks.
1207
+ """,
1208
+ FLAUBERT_START_DOCSTRING,
1209
+ )
1210
+ # Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
1211
+ class TFFlaubertForMultipleChoice(TFFlaubertPreTrainedModel, TFMultipleChoiceLoss):
1212
+ def __init__(self, config, *inputs, **kwargs):
1213
+ super().__init__(config, *inputs, **kwargs)
1214
+
1215
+ self.transformer = TFFlaubertMainLayer(config, name="transformer")
1216
+ self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
1217
+ self.logits_proj = keras.layers.Dense(
1218
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj"
1219
+ )
1220
+ self.config = config
1221
+
1222
+ @property
1223
+ def dummy_inputs(self):
1224
+ """
1225
+ Dummy inputs to build the network.
1226
+
1227
+ Returns:
1228
+ tf.Tensor with dummy inputs
1229
+ """
1230
+ # Sometimes Flaubert has language embeddings so don't forget to build them as well if needed
1231
+ if self.config.use_lang_emb and self.config.n_langs > 1:
1232
+ return {
1233
+ "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
1234
+ "langs": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
1235
+ }
1236
+ else:
1237
+ return {
1238
+ "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
1239
+ }
1240
+
1241
+ @unpack_inputs
1242
+ @add_start_docstrings_to_model_forward(
1243
+ FLAUBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1244
+ )
1245
+ @add_code_sample_docstrings(
1246
+ checkpoint=_CHECKPOINT_FOR_DOC,
1247
+ output_type=TFMultipleChoiceModelOutput,
1248
+ config_class=_CONFIG_FOR_DOC,
1249
+ )
1250
+ def call(
1251
+ self,
1252
+ input_ids: TFModelInputType | None = None,
1253
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1254
+ langs: np.ndarray | tf.Tensor | None = None,
1255
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1256
+ position_ids: np.ndarray | tf.Tensor | None = None,
1257
+ lengths: np.ndarray | tf.Tensor | None = None,
1258
+ cache: Optional[Dict[str, tf.Tensor]] = None,
1259
+ head_mask: np.ndarray | tf.Tensor | None = None,
1260
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1261
+ output_attentions: Optional[bool] = None,
1262
+ output_hidden_states: Optional[bool] = None,
1263
+ return_dict: Optional[bool] = None,
1264
+ labels: np.ndarray | tf.Tensor | None = None,
1265
+ training: bool = False,
1266
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1267
+ if input_ids is not None:
1268
+ num_choices = shape_list(input_ids)[1]
1269
+ seq_length = shape_list(input_ids)[2]
1270
+ else:
1271
+ num_choices = shape_list(inputs_embeds)[1]
1272
+ seq_length = shape_list(inputs_embeds)[2]
1273
+
1274
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1275
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1276
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1277
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1278
+ flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None
1279
+ flat_inputs_embeds = (
1280
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1281
+ if inputs_embeds is not None
1282
+ else None
1283
+ )
1284
+
1285
+ if lengths is not None:
1286
+ logger.warning(
1287
+ "The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the "
1288
+ "attention mask instead.",
1289
+ )
1290
+ lengths = None
1291
+
1292
+ transformer_outputs = self.transformer(
1293
+ flat_input_ids,
1294
+ flat_attention_mask,
1295
+ flat_langs,
1296
+ flat_token_type_ids,
1297
+ flat_position_ids,
1298
+ lengths,
1299
+ cache,
1300
+ head_mask,
1301
+ flat_inputs_embeds,
1302
+ output_attentions,
1303
+ output_hidden_states,
1304
+ return_dict=return_dict,
1305
+ training=training,
1306
+ )
1307
+ output = transformer_outputs[0]
1308
+ logits = self.sequence_summary(output)
1309
+ logits = self.logits_proj(logits)
1310
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1311
+
1312
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1313
+
1314
+ if not return_dict:
1315
+ output = (reshaped_logits,) + transformer_outputs[1:]
1316
+ return ((loss,) + output) if loss is not None else output
1317
+
1318
+ return TFMultipleChoiceModelOutput(
1319
+ loss=loss,
1320
+ logits=reshaped_logits,
1321
+ hidden_states=transformer_outputs.hidden_states,
1322
+ attentions=transformer_outputs.attentions,
1323
+ )
1324
+
1325
+ def build(self, input_shape=None):
1326
+ if self.built:
1327
+ return
1328
+ self.built = True
1329
+ if getattr(self, "transformer", None) is not None:
1330
+ with tf.name_scope(self.transformer.name):
1331
+ self.transformer.build(None)
1332
+ if getattr(self, "sequence_summary", None) is not None:
1333
+ with tf.name_scope(self.sequence_summary.name):
1334
+ self.sequence_summary.build(None)
1335
+ if getattr(self, "logits_proj", None) is not None:
1336
+ with tf.name_scope(self.logits_proj.name):
1337
+ self.logits_proj.build([None, None, self.config.num_labels])
venv/lib/python3.10/site-packages/transformers/models/flaubert/tokenization_flaubert.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Flaubert."""
16
+
17
+
18
+ import json
19
+ import os
20
+ import re
21
+ import unicodedata
22
+ from typing import List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "vocab_file": "vocab.json",
32
+ "merges_file": "merges.txt",
33
+ }
34
+
35
+
36
+ def convert_to_unicode(text):
37
+ """
38
+ Converts `text` to Unicode (if it's not already), assuming UTF-8 input.
39
+ """
40
+
41
+ def ensure_text(s, encoding="utf-8", errors="strict"):
42
+ if isinstance(s, bytes):
43
+ return s.decode(encoding, errors)
44
+ elif isinstance(s, str):
45
+ return s
46
+ else:
47
+ raise TypeError(f"not expecting type '{type(s)}'")
48
+
49
+ return ensure_text(text, encoding="utf-8", errors="ignore")
50
+
51
+
52
+ # Copied from transformers.models.xlm.tokenization_xlm.get_pairs
53
+ def get_pairs(word):
54
+ """
55
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
56
+ strings)
57
+ """
58
+ pairs = set()
59
+ prev_char = word[0]
60
+ for char in word[1:]:
61
+ pairs.add((prev_char, char))
62
+ prev_char = char
63
+ return pairs
64
+
65
+
66
+ # Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct
67
+ def replace_unicode_punct(text):
68
+ """
69
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
70
+ """
71
+ text = text.replace(",", ",")
72
+ text = re.sub(r"。\s*", ". ", text)
73
+ text = text.replace("、", ",")
74
+ text = text.replace("”", '"')
75
+ text = text.replace("“", '"')
76
+ text = text.replace("∶", ":")
77
+ text = text.replace(":", ":")
78
+ text = text.replace("?", "?")
79
+ text = text.replace("《", '"')
80
+ text = text.replace("》", '"')
81
+ text = text.replace(")", ")")
82
+ text = text.replace("!", "!")
83
+ text = text.replace("(", "(")
84
+ text = text.replace(";", ";")
85
+ text = text.replace("1", "1")
86
+ text = text.replace("」", '"')
87
+ text = text.replace("「", '"')
88
+ text = text.replace("0", "0")
89
+ text = text.replace("3", "3")
90
+ text = text.replace("2", "2")
91
+ text = text.replace("5", "5")
92
+ text = text.replace("6", "6")
93
+ text = text.replace("9", "9")
94
+ text = text.replace("7", "7")
95
+ text = text.replace("8", "8")
96
+ text = text.replace("4", "4")
97
+ text = re.sub(r".\s*", ". ", text)
98
+ text = text.replace("~", "~")
99
+ text = text.replace("’", "'")
100
+ text = text.replace("…", "...")
101
+ text = text.replace("━", "-")
102
+ text = text.replace("〈", "<")
103
+ text = text.replace("〉", ">")
104
+ text = text.replace("【", "[")
105
+ text = text.replace("】", "]")
106
+ text = text.replace("%", "%")
107
+ return text
108
+
109
+
110
+ # Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char
111
+ def remove_non_printing_char(text):
112
+ """
113
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
114
+ """
115
+ output = []
116
+ for char in text:
117
+ cat = unicodedata.category(char)
118
+ if cat.startswith("C"):
119
+ continue
120
+ output.append(char)
121
+ return "".join(output)
122
+
123
+
124
+ class FlaubertTokenizer(PreTrainedTokenizer):
125
+ """
126
+ Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
127
+
128
+ - Moses preprocessing and tokenization.
129
+ - Normalizing all inputs text.
130
+ - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
131
+ "__classify__") to a vocabulary.
132
+ - The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies).
133
+
134
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
135
+ this superclass for more information regarding those methods.
136
+
137
+ Args:
138
+ vocab_file (`str`):
139
+ Vocabulary file.
140
+ merges_file (`str`):
141
+ Merges file.
142
+ do_lowercase (`bool`, *optional*, defaults to `False`):
143
+ Controls lower casing.
144
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
145
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
146
+ token instead.
147
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
148
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
149
+
150
+ <Tip>
151
+
152
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
153
+ sequence. The token used is the `cls_token`.
154
+
155
+ </Tip>
156
+
157
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
158
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
159
+ sequence classification or for a text and a question for question answering. It is also used as the last
160
+ token of a sequence built with special tokens.
161
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
162
+ The token used for padding, for example when batching sequences of different lengths.
163
+ cls_token (`str`, *optional*, defaults to `"</s>"`):
164
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
165
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
166
+ mask_token (`str`, *optional*, defaults to `"<special1>"`):
167
+ The token used for masking values. This is the token used when training this model with masked language
168
+ modeling. This is the token which the model will try to predict.
169
+ additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`):
170
+ List of additional special tokens.
171
+ lang2id (`Dict[str, int]`, *optional*):
172
+ Dictionary mapping languages string identifiers to their IDs.
173
+ id2lang (`Dict[int, str]`, *optional*):
174
+ Dictionary mapping language IDs to their string identifiers.
175
+ """
176
+
177
+ vocab_files_names = VOCAB_FILES_NAMES
178
+
179
+ def __init__(
180
+ self,
181
+ vocab_file,
182
+ merges_file,
183
+ do_lowercase=False,
184
+ unk_token="<unk>",
185
+ bos_token="<s>",
186
+ sep_token="</s>",
187
+ pad_token="<pad>",
188
+ cls_token="</s>",
189
+ mask_token="<special1>",
190
+ additional_special_tokens=[
191
+ "<special0>",
192
+ "<special1>",
193
+ "<special2>",
194
+ "<special3>",
195
+ "<special4>",
196
+ "<special5>",
197
+ "<special6>",
198
+ "<special7>",
199
+ "<special8>",
200
+ "<special9>",
201
+ ],
202
+ lang2id=None,
203
+ id2lang=None,
204
+ **kwargs,
205
+ ):
206
+ do_lowercase_and_remove_accent = kwargs.pop("do_lowercase_and_remove_accent", None)
207
+ if do_lowercase_and_remove_accent is not None:
208
+ logger.warning(
209
+ "`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything."
210
+ " `FlaubertTokenizer` will always set it to `False`."
211
+ )
212
+ # always `False`
213
+ self.do_lowercase_and_remove_accent = False
214
+
215
+ self.do_lowercase = do_lowercase
216
+
217
+ try:
218
+ import sacremoses
219
+ except ImportError:
220
+ raise ImportError(
221
+ "You need to install sacremoses to use FlaubertTokenizer. "
222
+ "See https://pypi.org/project/sacremoses/ for installation."
223
+ )
224
+
225
+ self.sm = sacremoses
226
+
227
+ # cache of sm.MosesPunctNormalizer instance
228
+ self.cache_moses_punct_normalizer = {}
229
+ # cache of sm.MosesTokenizer instance
230
+ self.cache_moses_tokenizer = {}
231
+ self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
232
+ self.lang2id = lang2id
233
+ self.id2lang = id2lang
234
+ if lang2id is not None and id2lang is not None:
235
+ assert len(lang2id) == len(id2lang)
236
+
237
+ self.ja_word_tokenizer = None
238
+ self.zh_word_tokenizer = None
239
+
240
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
241
+ self.encoder = json.load(vocab_handle)
242
+ self.decoder = {v: k for k, v in self.encoder.items()}
243
+ with open(merges_file, encoding="utf-8") as merges_handle:
244
+ merges = merges_handle.read().split("\n")[:-1]
245
+ merges = [tuple(merge.split()[:2]) for merge in merges]
246
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
247
+ self.cache = {}
248
+
249
+ super().__init__(
250
+ unk_token=unk_token,
251
+ bos_token=bos_token,
252
+ sep_token=sep_token,
253
+ pad_token=pad_token,
254
+ cls_token=cls_token,
255
+ mask_token=mask_token,
256
+ additional_special_tokens=additional_special_tokens,
257
+ lang2id=lang2id,
258
+ id2lang=id2lang,
259
+ **kwargs,
260
+ )
261
+
262
+ @property
263
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case
264
+ def do_lower_case(self):
265
+ return self.do_lowercase_and_remove_accent
266
+
267
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm
268
+ def moses_punct_norm(self, text, lang):
269
+ if lang not in self.cache_moses_punct_normalizer:
270
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
271
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
272
+ else:
273
+ punct_normalizer = self.cache_moses_punct_normalizer[lang]
274
+ return punct_normalizer.normalize(text)
275
+
276
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize
277
+ def moses_tokenize(self, text, lang):
278
+ if lang not in self.cache_moses_tokenizer:
279
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
280
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
281
+ else:
282
+ moses_tokenizer = self.cache_moses_tokenizer[lang]
283
+ return moses_tokenizer.tokenize(text, return_str=False, escape=False)
284
+
285
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline
286
+ def moses_pipeline(self, text, lang):
287
+ text = replace_unicode_punct(text)
288
+ text = self.moses_punct_norm(text, lang)
289
+ text = remove_non_printing_char(text)
290
+ return text
291
+
292
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize
293
+ def ja_tokenize(self, text):
294
+ if self.ja_word_tokenizer is None:
295
+ try:
296
+ import Mykytea
297
+
298
+ self.ja_word_tokenizer = Mykytea.Mykytea(
299
+ f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
300
+ )
301
+ except (AttributeError, ImportError):
302
+ logger.error(
303
+ "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
304
+ " (https://github.com/chezou/Mykytea-python) with the following steps"
305
+ )
306
+ logger.error("1. git clone [email protected]:neubig/kytea.git && cd kytea")
307
+ logger.error("2. autoreconf -i")
308
+ logger.error("3. ./configure --prefix=$HOME/local")
309
+ logger.error("4. make && make install")
310
+ logger.error("5. pip install kytea")
311
+ raise
312
+ return list(self.ja_word_tokenizer.getWS(text))
313
+
314
+ @property
315
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size
316
+ def vocab_size(self):
317
+ return len(self.encoder)
318
+
319
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab
320
+ def get_vocab(self):
321
+ return dict(self.encoder, **self.added_tokens_encoder)
322
+
323
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe
324
+ def bpe(self, token):
325
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
326
+ if token in self.cache:
327
+ return self.cache[token]
328
+ pairs = get_pairs(word)
329
+
330
+ if not pairs:
331
+ return token + "</w>"
332
+
333
+ while True:
334
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
335
+ if bigram not in self.bpe_ranks:
336
+ break
337
+ first, second = bigram
338
+ new_word = []
339
+ i = 0
340
+ while i < len(word):
341
+ try:
342
+ j = word.index(first, i)
343
+ except ValueError:
344
+ new_word.extend(word[i:])
345
+ break
346
+ else:
347
+ new_word.extend(word[i:j])
348
+ i = j
349
+
350
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
351
+ new_word.append(first + second)
352
+ i += 2
353
+ else:
354
+ new_word.append(word[i])
355
+ i += 1
356
+ new_word = tuple(new_word)
357
+ word = new_word
358
+ if len(word) == 1:
359
+ break
360
+ else:
361
+ pairs = get_pairs(word)
362
+ word = " ".join(word)
363
+ if word == "\n </w>":
364
+ word = "\n</w>"
365
+ self.cache[token] = word
366
+ return word
367
+
368
+ def preprocess_text(self, text):
369
+ text = text.replace("``", '"').replace("''", '"')
370
+ text = convert_to_unicode(text)
371
+ text = unicodedata.normalize("NFC", text)
372
+
373
+ if self.do_lowercase:
374
+ text = text.lower()
375
+
376
+ return text
377
+
378
+ def _tokenize(self, text, bypass_tokenizer=False):
379
+ """
380
+ Tokenize a string given language code using Moses.
381
+
382
+ Details of tokenization:
383
+
384
+ - [sacremoses](https://github.com/alvations/sacremoses): port of Moses
385
+ - Install with `pip install sacremoses`
386
+
387
+ Args:
388
+ - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
389
+ (bool). If True, we only apply BPE.
390
+
391
+ Returns:
392
+ List of tokens.
393
+ """
394
+ lang = "fr"
395
+ if lang and self.lang2id and lang not in self.lang2id:
396
+ logger.error(
397
+ "Supplied language code not found in lang2id mapping. Please check that your language is supported by"
398
+ " the loaded pretrained model."
399
+ )
400
+
401
+ if bypass_tokenizer:
402
+ text = text.split()
403
+ else:
404
+ text = self.preprocess_text(text)
405
+ text = self.moses_pipeline(text, lang=lang)
406
+ text = self.moses_tokenize(text, lang=lang)
407
+
408
+ split_tokens = []
409
+ for token in text:
410
+ if token:
411
+ split_tokens.extend(list(self.bpe(token).split(" ")))
412
+
413
+ return split_tokens
414
+
415
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id
416
+ def _convert_token_to_id(self, token):
417
+ """Converts a token (str) in an id using the vocab."""
418
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
419
+
420
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token
421
+ def _convert_id_to_token(self, index):
422
+ """Converts an index (integer) in a token (str) using the vocab."""
423
+ return self.decoder.get(index, self.unk_token)
424
+
425
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string
426
+ def convert_tokens_to_string(self, tokens):
427
+ """Converts a sequence of tokens (string) in a single string."""
428
+ out_string = "".join(tokens).replace("</w>", " ").strip()
429
+ return out_string
430
+
431
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens
432
+ def build_inputs_with_special_tokens(
433
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
434
+ ) -> List[int]:
435
+ """
436
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
437
+ adding special tokens. An XLM sequence has the following format:
438
+
439
+ - single sequence: `<s> X </s>`
440
+ - pair of sequences: `<s> A </s> B </s>`
441
+
442
+ Args:
443
+ token_ids_0 (`List[int]`):
444
+ List of IDs to which the special tokens will be added.
445
+ token_ids_1 (`List[int]`, *optional*):
446
+ Optional second list of IDs for sequence pairs.
447
+
448
+ Returns:
449
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
450
+
451
+ """
452
+ bos = [self.bos_token_id]
453
+ sep = [self.sep_token_id]
454
+
455
+ if token_ids_1 is None:
456
+ return bos + token_ids_0 + sep
457
+ return bos + token_ids_0 + sep + token_ids_1 + sep
458
+
459
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask
460
+ def get_special_tokens_mask(
461
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
462
+ ) -> List[int]:
463
+ """
464
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
465
+ special tokens using the tokenizer `prepare_for_model` method.
466
+
467
+ Args:
468
+ token_ids_0 (`List[int]`):
469
+ List of IDs.
470
+ token_ids_1 (`List[int]`, *optional*):
471
+ Optional second list of IDs for sequence pairs.
472
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
473
+ Whether or not the token list is already formatted with special tokens for the model.
474
+
475
+ Returns:
476
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
477
+ """
478
+
479
+ if already_has_special_tokens:
480
+ return super().get_special_tokens_mask(
481
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
482
+ )
483
+
484
+ if token_ids_1 is not None:
485
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
486
+ return [1] + ([0] * len(token_ids_0)) + [1]
487
+
488
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences
489
+ def create_token_type_ids_from_sequences(
490
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
491
+ ) -> List[int]:
492
+ """
493
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence
494
+ pair mask has the following format:
495
+
496
+ ```
497
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
498
+ | first sequence | second sequence |
499
+ ```
500
+
501
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
502
+
503
+ Args:
504
+ token_ids_0 (`List[int]`):
505
+ List of IDs.
506
+ token_ids_1 (`List[int]`, *optional*):
507
+ Optional second list of IDs for sequence pairs.
508
+
509
+ Returns:
510
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
511
+ """
512
+ sep = [self.sep_token_id]
513
+ cls = [self.cls_token_id]
514
+ if token_ids_1 is None:
515
+ return len(cls + token_ids_0 + sep) * [0]
516
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
517
+
518
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary
519
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
520
+ if not os.path.isdir(save_directory):
521
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
522
+ return
523
+ vocab_file = os.path.join(
524
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
525
+ )
526
+ merge_file = os.path.join(
527
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
528
+ )
529
+
530
+ with open(vocab_file, "w", encoding="utf-8") as f:
531
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
532
+
533
+ index = 0
534
+ with open(merge_file, "w", encoding="utf-8") as writer:
535
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
536
+ if index != token_index:
537
+ logger.warning(
538
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
539
+ " Please check that the tokenizer is not corrupted!"
540
+ )
541
+ index = token_index
542
+ writer.write(" ".join(bpe_tokens) + "\n")
543
+ index += 1
544
+
545
+ return vocab_file, merge_file
546
+
547
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__
548
+ def __getstate__(self):
549
+ state = self.__dict__.copy()
550
+ state["sm"] = None
551
+ return state
552
+
553
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__
554
+ def __setstate__(self, d):
555
+ self.__dict__ = d
556
+
557
+ try:
558
+ import sacremoses
559
+ except ImportError:
560
+ raise ImportError(
561
+ "You need to install sacremoses to use XLMTokenizer. "
562
+ "See https://pypi.org/project/sacremoses/ for installation."
563
+ )
564
+
565
+ self.sm = sacremoses
venv/lib/python3.10/site-packages/transformers/models/mvp/__init__.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
21
+ "tokenization_mvp": ["MvpTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_tokenizers_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_mvp_fast"] = ["MvpTokenizerFast"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_mvp"] = [
39
+ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "MvpForCausalLM",
41
+ "MvpForConditionalGeneration",
42
+ "MvpForQuestionAnswering",
43
+ "MvpForSequenceClassification",
44
+ "MvpModel",
45
+ "MvpPreTrainedModel",
46
+ ]
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
50
+ from .tokenization_mvp import MvpTokenizer
51
+
52
+ try:
53
+ if not is_tokenizers_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .tokenization_mvp_fast import MvpTokenizerFast
59
+
60
+ try:
61
+ if not is_torch_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .modeling_mvp import (
67
+ MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
68
+ MvpForCausalLM,
69
+ MvpForConditionalGeneration,
70
+ MvpForQuestionAnswering,
71
+ MvpForSequenceClassification,
72
+ MvpModel,
73
+ MvpPreTrainedModel,
74
+ )
75
+
76
+ else:
77
+ import sys
78
+
79
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/configuration_mvp.cpython-310.pyc ADDED
Binary file (7.02 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/modeling_mvp.cpython-310.pyc ADDED
Binary file (64.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/tokenization_mvp.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/tokenization_mvp_fast.cpython-310.pyc ADDED
Binary file (9.57 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mvp/configuration_mvp.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MVP model configuration"""
16
+ import warnings
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class MvpConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`MvpModel`]. It is used to instantiate a MVP model
28
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
29
+ defaults will yield a similar configuration to that of the MVP [RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp)
30
+ architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 50267):
38
+ Vocabulary size of the MVP model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`MvpModel`].
40
+ d_model (`int`, *optional*, defaults to 1024):
41
+ Dimensionality of the layers and the pooler layer.
42
+ encoder_layers (`int`, *optional*, defaults to 12):
43
+ Number of encoder layers.
44
+ decoder_layers (`int`, *optional*, defaults to 12):
45
+ Number of decoder layers.
46
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer decoder.
50
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
51
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
52
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
54
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ dropout (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for the attention probabilities.
61
+ activation_dropout (`float`, *optional*, defaults to 0.0):
62
+ The dropout ratio for activations inside the fully connected layer.
63
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for classifier.
65
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
66
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
67
+ just in case (e.g., 512 or 1024 or 2048).
68
+ init_std (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
71
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
72
+ for more details.
73
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
74
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
75
+ for more details.
76
+ scale_embedding (`bool`, *optional*, defaults to `False`):
77
+ Scale embeddings by diving by sqrt(d_model).
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models).
80
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
81
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
82
+ `eos_token_id`.
83
+ use_prompt (`bool`, *optional*, defaults to `False`):
84
+ Whether or not to use prompt.
85
+ prompt_length (`int`, *optional*, defaults to 100):
86
+ The length of prompt.
87
+ prompt_mid_dim (`int`, *optional*, defaults to 800):
88
+ Dimensionality of the "intermediate" layer in prompt.
89
+ Example:
90
+
91
+ ```python
92
+ >>> from transformers import MvpConfig, MvpModel
93
+
94
+ >>> # Initializing a MVP RUCAIBox/mvp style configuration
95
+ >>> configuration = MvpConfig()
96
+
97
+ >>> # Initializing a model (with random weights) from the RUCAIBox/mvp style configuration
98
+ >>> model = MvpModel(configuration)
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "mvp"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=50267,
111
+ max_position_embeddings=1024,
112
+ encoder_layers=12,
113
+ encoder_ffn_dim=4096,
114
+ encoder_attention_heads=16,
115
+ decoder_layers=12,
116
+ decoder_ffn_dim=4096,
117
+ decoder_attention_heads=16,
118
+ encoder_layerdrop=0.0,
119
+ decoder_layerdrop=0.0,
120
+ activation_function="gelu",
121
+ d_model=1024,
122
+ dropout=0.1,
123
+ attention_dropout=0.0,
124
+ activation_dropout=0.0,
125
+ init_std=0.02,
126
+ classifier_dropout=0.0,
127
+ scale_embedding=False,
128
+ use_cache=True,
129
+ pad_token_id=1,
130
+ bos_token_id=0,
131
+ eos_token_id=2,
132
+ is_encoder_decoder=True,
133
+ decoder_start_token_id=2,
134
+ forced_eos_token_id=2,
135
+ use_prompt=False,
136
+ prompt_length=100,
137
+ prompt_mid_dim=800,
138
+ **kwargs,
139
+ ):
140
+ self.vocab_size = vocab_size
141
+ self.max_position_embeddings = max_position_embeddings
142
+ self.d_model = d_model
143
+ self.encoder_ffn_dim = encoder_ffn_dim
144
+ self.encoder_layers = encoder_layers
145
+ self.encoder_attention_heads = encoder_attention_heads
146
+ self.decoder_ffn_dim = decoder_ffn_dim
147
+ self.decoder_layers = decoder_layers
148
+ self.decoder_attention_heads = decoder_attention_heads
149
+ self.dropout = dropout
150
+ self.attention_dropout = attention_dropout
151
+ self.activation_dropout = activation_dropout
152
+ self.activation_function = activation_function
153
+ self.init_std = init_std
154
+ self.encoder_layerdrop = encoder_layerdrop
155
+ self.decoder_layerdrop = decoder_layerdrop
156
+ self.classifier_dropout = classifier_dropout
157
+ self.use_cache = use_cache
158
+ self.num_hidden_layers = encoder_layers
159
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
160
+ self.use_prompt = use_prompt
161
+ self.prompt_length = prompt_length
162
+ self.prompt_mid_dim = prompt_mid_dim
163
+
164
+ super().__init__(
165
+ pad_token_id=pad_token_id,
166
+ bos_token_id=bos_token_id,
167
+ eos_token_id=eos_token_id,
168
+ is_encoder_decoder=is_encoder_decoder,
169
+ decoder_start_token_id=decoder_start_token_id,
170
+ forced_eos_token_id=forced_eos_token_id,
171
+ **kwargs,
172
+ )
173
+
174
+ if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
175
+ self.forced_bos_token_id = self.bos_token_id
176
+ warnings.warn(
177
+ f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
178
+ "The config can simply be saved and uploaded again to be fixed."
179
+ )
venv/lib/python3.10/site-packages/transformers/models/mvp/modeling_mvp.py ADDED
@@ -0,0 +1,2009 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch MVP model."""
16
+ import copy
17
+ import math
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ CausalLMOutputWithCrossAttentions,
31
+ Seq2SeqLMOutput,
32
+ Seq2SeqModelOutput,
33
+ Seq2SeqQuestionAnsweringModelOutput,
34
+ Seq2SeqSequenceClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import (
38
+ add_code_sample_docstrings,
39
+ add_end_docstrings,
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from .configuration_mvp import MvpConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ _CHECKPOINT_FOR_DOC = "RUCAIBox/mvp"
51
+ _CONFIG_FOR_DOC = "MvpConfig"
52
+
53
+ # Base model docstring
54
+ _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
55
+
56
+
57
+ from ..deprecated._archive_maps import MVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
61
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
62
+ """
63
+ Shift input ids one token to the right.
64
+ """
65
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
66
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
67
+ shifted_input_ids[:, 0] = decoder_start_token_id
68
+
69
+ if pad_token_id is None:
70
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
71
+ # replace possible -100 values in labels by `pad_token_id`
72
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
73
+
74
+ return shifted_input_ids
75
+
76
+
77
+ # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->MVP
78
+ class MvpLearnedPositionalEmbedding(nn.Embedding):
79
+ """
80
+ This module learns positional embeddings up to a fixed maximum size.
81
+ """
82
+
83
+ def __init__(self, num_embeddings: int, embedding_dim: int):
84
+ # MVP is set up so that if padding_idx is specified then offset the embedding ids by 2
85
+ # and adjust num_embeddings appropriately. Other models don't have this hack
86
+ self.offset = 2
87
+ super().__init__(num_embeddings + self.offset, embedding_dim)
88
+
89
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
90
+ """`input_ids' shape is expected to be [bsz x seqlen]."""
91
+
92
+ bsz, seq_len = input_ids.shape[:2]
93
+ positions = torch.arange(
94
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
95
+ ).expand(bsz, -1)
96
+
97
+ return super().forward(positions + self.offset)
98
+
99
+
100
+ class MvpAttention(nn.Module):
101
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
102
+
103
+ def __init__(
104
+ self,
105
+ embed_dim: int,
106
+ num_heads: int,
107
+ dropout: float = 0.0,
108
+ is_decoder: bool = False,
109
+ bias: bool = True,
110
+ ):
111
+ super().__init__()
112
+ self.embed_dim = embed_dim
113
+ self.num_heads = num_heads
114
+ self.dropout = dropout
115
+ self.head_dim = embed_dim // num_heads
116
+
117
+ if (self.head_dim * num_heads) != self.embed_dim:
118
+ raise ValueError(
119
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
120
+ f" and `num_heads`: {num_heads})."
121
+ )
122
+ self.scaling = self.head_dim**-0.5
123
+ self.is_decoder = is_decoder
124
+
125
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
126
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
127
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
128
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
129
+
130
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
131
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
132
+
133
+ def forward(
134
+ self,
135
+ hidden_states: torch.Tensor,
136
+ key_value_states: Optional[torch.Tensor] = None,
137
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
138
+ attention_mask: Optional[torch.Tensor] = None,
139
+ layer_head_mask: Optional[torch.Tensor] = None,
140
+ attn_prompt: Optional[torch.Tensor] = None,
141
+ output_attentions: bool = False,
142
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
143
+ """Input shape: Batch x Time x Channel"""
144
+
145
+ # if key_value_states are provided this layer is used as a cross-attention layer
146
+ # for the decoder
147
+ is_cross_attention = key_value_states is not None
148
+
149
+ bsz, tgt_len, _ = hidden_states.size()
150
+
151
+ # get query proj
152
+ query_states = self.q_proj(hidden_states) * self.scaling
153
+ # get key, value proj
154
+ if is_cross_attention and past_key_value is not None:
155
+ # reuse k,v, cross_attentions
156
+ key_states = past_key_value[0]
157
+ value_states = past_key_value[1]
158
+ elif is_cross_attention:
159
+ # cross_attentions
160
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
161
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
162
+ elif past_key_value is not None:
163
+ # reuse k, v, self_attention
164
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
165
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
166
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
167
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
168
+ else:
169
+ # self_attention
170
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
171
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
172
+
173
+ if self.is_decoder:
174
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
175
+ # Further calls to cross_attention layer can then reuse all cross-attention
176
+ # key/value_states (first "if" case)
177
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
178
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
179
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
180
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
181
+ past_key_value = (key_states, value_states)
182
+
183
+ if attn_prompt is not None:
184
+ key_states = torch.cat([attn_prompt[0].expand(bsz, -1, -1, -1), key_states], dim=2)
185
+ value_states = torch.cat([attn_prompt[1].expand(bsz, -1, -1, -1), value_states], dim=2)
186
+ if attention_mask is not None:
187
+ prompt_mask = torch.zeros(bsz, 1, tgt_len, attn_prompt[0].size(1)).to(attention_mask.device)
188
+ attention_mask = torch.cat([prompt_mask, attention_mask], dim=(-1))
189
+
190
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
191
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
192
+ key_states = key_states.view(*proj_shape)
193
+ value_states = value_states.view(*proj_shape)
194
+
195
+ src_len = key_states.size(1)
196
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
197
+
198
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
199
+ raise ValueError(
200
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
201
+ f" {attn_weights.size()}"
202
+ )
203
+
204
+ if attention_mask is not None:
205
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
206
+ raise ValueError(
207
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
208
+ )
209
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
210
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
211
+
212
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
213
+
214
+ if layer_head_mask is not None:
215
+ if layer_head_mask.size() != (self.num_heads,):
216
+ raise ValueError(
217
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
218
+ f" {layer_head_mask.size()}"
219
+ )
220
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
221
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
222
+
223
+ if output_attentions:
224
+ # this operation is a bit awkward, but it's required to
225
+ # make sure that attn_weights keeps its gradient.
226
+ # In order to do so, attn_weights have to be reshaped
227
+ # twice and have to be reused in the following
228
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
229
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
230
+ else:
231
+ attn_weights_reshaped = None
232
+
233
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
234
+
235
+ attn_output = torch.bmm(attn_probs, value_states)
236
+
237
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
238
+ raise ValueError(
239
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
240
+ f" {attn_output.size()}"
241
+ )
242
+
243
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
244
+ attn_output = attn_output.transpose(1, 2)
245
+
246
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
247
+ # partitioned aross GPUs when using tensor-parallelism.
248
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
249
+
250
+ attn_output = self.out_proj(attn_output)
251
+
252
+ return attn_output, attn_weights_reshaped, past_key_value
253
+
254
+
255
+ class MvpEncoderLayer(nn.Module):
256
+ def __init__(self, config: MvpConfig):
257
+ super().__init__()
258
+ self.embed_dim = config.d_model
259
+ self.self_attn = MvpAttention(
260
+ embed_dim=self.embed_dim,
261
+ num_heads=config.encoder_attention_heads,
262
+ dropout=config.attention_dropout,
263
+ )
264
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
265
+ self.dropout = config.dropout
266
+ self.activation_fn = ACT2FN[config.activation_function]
267
+ self.activation_dropout = config.activation_dropout
268
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
269
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
270
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
271
+
272
+ def forward(
273
+ self,
274
+ hidden_states: torch.FloatTensor,
275
+ attention_mask: torch.FloatTensor,
276
+ layer_head_mask: torch.FloatTensor,
277
+ self_attn_prompt: torch.FloatTensor,
278
+ output_attentions: Optional[bool] = False,
279
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
280
+ """
281
+ Args:
282
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
283
+ attention_mask (`torch.FloatTensor`): attention mask of size
284
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
285
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
286
+ `(encoder_attention_heads,)`.
287
+ self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape
288
+ `(2, encoder_attention_heads, pro_len, head_dim)`.
289
+ output_attentions (`bool`, *optional*):
290
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
291
+ returned tensors for more detail.
292
+ """
293
+ residual = hidden_states
294
+ hidden_states, attn_weights, _ = self.self_attn(
295
+ hidden_states=hidden_states,
296
+ attention_mask=attention_mask,
297
+ layer_head_mask=layer_head_mask,
298
+ attn_prompt=self_attn_prompt,
299
+ output_attentions=output_attentions,
300
+ )
301
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
302
+ hidden_states = residual + hidden_states
303
+ hidden_states = self.self_attn_layer_norm(hidden_states)
304
+
305
+ residual = hidden_states
306
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
307
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
308
+ hidden_states = self.fc2(hidden_states)
309
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
310
+ hidden_states = residual + hidden_states
311
+ hidden_states = self.final_layer_norm(hidden_states)
312
+
313
+ if hidden_states.dtype == torch.float16 and (
314
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
315
+ ):
316
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
317
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
318
+
319
+ outputs = (hidden_states,)
320
+
321
+ if output_attentions:
322
+ outputs += (attn_weights,)
323
+
324
+ return outputs
325
+
326
+
327
+ class MvpDecoderLayer(nn.Module):
328
+ def __init__(self, config: MvpConfig):
329
+ super().__init__()
330
+ self.embed_dim = config.d_model
331
+
332
+ self.self_attn = MvpAttention(
333
+ embed_dim=self.embed_dim,
334
+ num_heads=config.decoder_attention_heads,
335
+ dropout=config.attention_dropout,
336
+ is_decoder=True,
337
+ )
338
+ self.dropout = config.dropout
339
+ self.activation_fn = ACT2FN[config.activation_function]
340
+ self.activation_dropout = config.activation_dropout
341
+
342
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
343
+ self.encoder_attn = MvpAttention(
344
+ self.embed_dim,
345
+ config.decoder_attention_heads,
346
+ dropout=config.attention_dropout,
347
+ is_decoder=True,
348
+ )
349
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
350
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
351
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
352
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
353
+
354
+ def forward(
355
+ self,
356
+ hidden_states: torch.Tensor,
357
+ attention_mask: Optional[torch.Tensor] = None,
358
+ encoder_hidden_states: Optional[torch.Tensor] = None,
359
+ encoder_attention_mask: Optional[torch.Tensor] = None,
360
+ layer_head_mask: Optional[torch.Tensor] = None,
361
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
362
+ self_attn_prompt: Optional[torch.Tensor] = None,
363
+ cross_attn_prompt: Optional[torch.Tensor] = None,
364
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
365
+ output_attentions: Optional[bool] = False,
366
+ use_cache: Optional[bool] = True,
367
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
368
+ """
369
+ Args:
370
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
371
+ attention_mask (`torch.FloatTensor`): attention mask of size
372
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
373
+ encoder_hidden_states (`torch.FloatTensor`):
374
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
375
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
376
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
377
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
378
+ `(encoder_attention_heads,)`.
379
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
380
+ size `(decoder_attention_heads,)`.
381
+ self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape
382
+ `(2, decoder_attention_heads, pro_len, head_dim)`.
383
+ cross_attn_prompt (`torch.FloatTensor`): prompt of cross attention of shape
384
+ `(2, decoder_attention_heads, pro_len, head_dim)`.
385
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
386
+ output_attentions (`bool`, *optional*):
387
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
388
+ returned tensors for more detail.
389
+ """
390
+ residual = hidden_states
391
+
392
+ # Self Attention
393
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
394
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
395
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
396
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
397
+ hidden_states=hidden_states,
398
+ past_key_value=self_attn_past_key_value,
399
+ attention_mask=attention_mask,
400
+ layer_head_mask=layer_head_mask,
401
+ attn_prompt=self_attn_prompt,
402
+ output_attentions=output_attentions,
403
+ )
404
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
405
+ hidden_states = residual + hidden_states
406
+ hidden_states = self.self_attn_layer_norm(hidden_states)
407
+
408
+ # Cross-Attention Block
409
+ cross_attn_present_key_value = None
410
+ cross_attn_weights = None
411
+ if encoder_hidden_states is not None:
412
+ residual = hidden_states
413
+
414
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
415
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
416
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
417
+ hidden_states=hidden_states,
418
+ key_value_states=encoder_hidden_states,
419
+ attention_mask=encoder_attention_mask,
420
+ layer_head_mask=cross_attn_layer_head_mask,
421
+ attn_prompt=cross_attn_prompt,
422
+ past_key_value=cross_attn_past_key_value,
423
+ output_attentions=output_attentions,
424
+ )
425
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
426
+ hidden_states = residual + hidden_states
427
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
428
+
429
+ # add cross-attn to positions 3,4 of present_key_value tuple
430
+ present_key_value = present_key_value + cross_attn_present_key_value
431
+
432
+ # Fully Connected
433
+ residual = hidden_states
434
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
435
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
436
+ hidden_states = self.fc2(hidden_states)
437
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
438
+ hidden_states = residual + hidden_states
439
+ hidden_states = self.final_layer_norm(hidden_states)
440
+
441
+ outputs = (hidden_states,)
442
+
443
+ if output_attentions:
444
+ outputs += (self_attn_weights, cross_attn_weights)
445
+
446
+ if use_cache:
447
+ outputs += (present_key_value,)
448
+
449
+ return outputs
450
+
451
+
452
+ # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MVP
453
+ class MvpClassificationHead(nn.Module):
454
+ """Head for sentence-level classification tasks."""
455
+
456
+ def __init__(
457
+ self,
458
+ input_dim: int,
459
+ inner_dim: int,
460
+ num_classes: int,
461
+ pooler_dropout: float,
462
+ ):
463
+ super().__init__()
464
+ self.dense = nn.Linear(input_dim, inner_dim)
465
+ self.dropout = nn.Dropout(p=pooler_dropout)
466
+ self.out_proj = nn.Linear(inner_dim, num_classes)
467
+
468
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
469
+ hidden_states = self.dropout(hidden_states)
470
+ hidden_states = self.dense(hidden_states)
471
+ hidden_states = torch.tanh(hidden_states)
472
+ hidden_states = self.dropout(hidden_states)
473
+ hidden_states = self.out_proj(hidden_states)
474
+ return hidden_states
475
+
476
+
477
+ class MvpPrompt(nn.Module):
478
+ """Layer-wise prompt for encoder or decoder."""
479
+
480
+ def __init__(self, config, num_layers, num_heads):
481
+ super().__init__()
482
+ self.prompt_length = config.prompt_length
483
+ self.num_layers = num_layers
484
+ self.num_heads = num_heads
485
+ self.head_dim = config.d_model // num_heads
486
+ self.dropout = nn.Dropout(p=config.dropout)
487
+ self.prompt_embedding = nn.Embedding(config.prompt_length, config.d_model)
488
+ self.prompt_trans = nn.Sequential(
489
+ nn.Linear(config.d_model, config.prompt_mid_dim),
490
+ nn.GELU(),
491
+ nn.Linear(config.prompt_mid_dim, num_layers * 2 * config.d_model),
492
+ )
493
+
494
+ def forward(self, prompt_ids: torch.Tensor) -> Tuple[torch.Tensor]:
495
+ prompt = self.prompt_trans(self.prompt_embedding(prompt_ids))
496
+ prompt = prompt.view(self.prompt_length, self.num_layers * 2, self.num_heads, self.head_dim)
497
+ prompt = self.dropout(prompt)
498
+ prompt = prompt.permute([1, 2, 0, 3]).split(2)
499
+ return prompt
500
+
501
+
502
+ class MvpPreTrainedModel(PreTrainedModel):
503
+ config_class = MvpConfig
504
+ base_model_prefix = "model"
505
+ supports_gradient_checkpointing = True
506
+
507
+ def _init_weights(self, module):
508
+ std = self.config.init_std
509
+ if isinstance(module, nn.Linear):
510
+ module.weight.data.normal_(mean=0.0, std=std)
511
+ if module.bias is not None:
512
+ module.bias.data.zero_()
513
+ elif isinstance(module, nn.Embedding):
514
+ module.weight.data.normal_(mean=0.0, std=std)
515
+ if module.padding_idx is not None:
516
+ module.weight.data[module.padding_idx].zero_()
517
+
518
+ @property
519
+ def dummy_inputs(self):
520
+ pad_token = self.config.pad_token_id
521
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
522
+ dummy_inputs = {
523
+ "attention_mask": input_ids.ne(pad_token),
524
+ "input_ids": input_ids,
525
+ }
526
+ return dummy_inputs
527
+
528
+
529
+ MVP_START_DOCSTRING = r"""
530
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
531
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
532
+ etc.)
533
+
534
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
535
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
536
+ and behavior.
537
+
538
+ Parameters:
539
+ config ([`MvpConfig`]):
540
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
541
+ load the weights associated with the model, only the configuration. Check out the
542
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
543
+ """
544
+
545
+ MVP_INPUTS_DOCSTRING = r"""
546
+ Args:
547
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
548
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
549
+ it.
550
+
551
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
552
+ [`PreTrainedTokenizer.__call__`] for details.
553
+
554
+ [What are input IDs?](../glossary#input-ids)
555
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
556
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
557
+
558
+ - 1 for tokens that are **not masked**,
559
+ - 0 for tokens that are **masked**.
560
+
561
+ [What are attention masks?](../glossary#attention-mask)
562
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
563
+ Indices of decoder input sequence tokens in the vocabulary.
564
+
565
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
566
+ [`PreTrainedTokenizer.__call__`] for details.
567
+
568
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
569
+
570
+ Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
571
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
572
+
573
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
574
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
575
+ for denoising pre-training following the paper.
576
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
577
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
578
+ be used by default.
579
+
580
+ If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`]
581
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
582
+ information on the default strategy.
583
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
584
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
585
+
586
+ - 1 indicates the head is **not masked**,
587
+ - 0 indicates the head is **masked**.
588
+
589
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
590
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
591
+
592
+ - 1 indicates the head is **not masked**,
593
+ - 0 indicates the head is **masked**.
594
+
595
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
596
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
597
+ 1]`:
598
+
599
+ - 1 indicates the head is **not masked**,
600
+ - 0 indicates the head is **masked**.
601
+
602
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
603
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
604
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
605
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
606
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
607
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
608
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
609
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
610
+
611
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
612
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
613
+
614
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
615
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
616
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
617
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
618
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
619
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
620
+ than the model's internal embedding lookup matrix.
621
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
622
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
623
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
624
+ input (see `past_key_values`). This is useful if you want more control over how to convert
625
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
626
+
627
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
628
+ of `inputs_embeds`.
629
+ use_cache (`bool`, *optional*):
630
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
631
+ `past_key_values`).
632
+ output_attentions (`bool`, *optional*):
633
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
634
+ tensors for more detail.
635
+ output_hidden_states (`bool`, *optional*):
636
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
637
+ more detail.
638
+ return_dict (`bool`, *optional*):
639
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
640
+ """
641
+
642
+ MVP_CONDITIONAL_GENERATION_EXAMPLE = r"""
643
+ Example of summarization:
644
+
645
+ Fine-tuning a model
646
+ ```python
647
+ >>> import torch
648
+ >>> from transformers import AutoTokenizer, MvpForConditionalGeneration
649
+
650
+ >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
651
+ >>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp")
652
+
653
+ >>> inputs = tokenizer(
654
+ ... "Summarize: You may want to stick it to your boss and leave your job, but don't do it if these are your reasons.",
655
+ ... return_tensors="pt",
656
+ ... )
657
+ >>> labels = tokenizer("Bad Reasons To Quit Your Job", return_tensors="pt")["input_ids"]
658
+
659
+ >>> loss = model(**inputs, labels=labels).loss
660
+ >>> loss.backward()
661
+ ```
662
+
663
+ Inference after the model fine-tuned
664
+ ```python
665
+ >>> with torch.no_grad():
666
+ ... generated_ids = model.generate(**inputs)
667
+
668
+ >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
669
+ ```
670
+ """
671
+
672
+ MVP_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
673
+ Example of single-label classification:
674
+
675
+ Fine-tuning a model on `num_labels` classes
676
+ ```python
677
+ >>> import torch
678
+ >>> from transformers import AutoTokenizer, MvpForSequenceClassification
679
+
680
+ >>> num_labels = 2 # for example, this is a binary classification task
681
+ >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
682
+ >>> model = MvpForSequenceClassification.from_pretrained("RUCAIBox/mvp", num_labels=num_labels)
683
+
684
+ >>> inputs = tokenizer("Classify: Hello, my dog is cute", return_tensors="pt")
685
+ >>> labels = torch.tensor(1) # the real label for inputs
686
+
687
+ >>> loss = model(**inputs, labels=labels).loss
688
+ >>> loss.backward()
689
+ ```
690
+
691
+ Inference after the model fine-tuned
692
+ ```python
693
+ >>> with torch.no_grad():
694
+ ... logits = model(**inputs).logits
695
+
696
+ >>> predicted_class_id = logits.argmax()
697
+ ```
698
+ """
699
+
700
+ MVP_QUESTION_ANSWERING_SAMPLE = r"""
701
+ Example:
702
+
703
+ Fine-tuning a model for extrative question answering, and our model also supports generative question answering
704
+ using `BartForConditionalGeneration`
705
+ ```python
706
+ >>> import torch
707
+ >>> from transformers import AutoTokenizer, MvpForQuestionAnswering
708
+
709
+ >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
710
+ >>> model = MvpForQuestionAnswering.from_pretrained("RUCAIBox/mvp")
711
+
712
+ >>> inputs = tokenizer(
713
+ ... "Answer the following question: Who was Jim Henson? [SEP] Jim Henson was a nice puppet",
714
+ ... return_tensors="pt",
715
+ ... )
716
+ >>> target_start_index = torch.tensor([18])
717
+ >>> target_end_index = torch.tensor([19])
718
+
719
+ >>> loss = model(**inputs, start_positions=target_start_index, end_positions=target_end_index).loss
720
+ >>> loss.backward()
721
+ ```
722
+
723
+ Inference after the model fine-tuned
724
+ ```python
725
+ >>> with torch.no_grad():
726
+ ... outputs = model(**inputs)
727
+
728
+ >>> answer_start_index = outputs.start_logits.argmax()
729
+ >>> answer_end_index = outputs.end_logits.argmax()
730
+
731
+ >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
732
+ >>> predict_answer = tokenizer.decode(predict_answer_tokens)
733
+ ```
734
+ """
735
+
736
+
737
+ class MvpEncoder(MvpPreTrainedModel):
738
+ """
739
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
740
+ [`MvpEncoderLayer`].
741
+
742
+ Args:
743
+ config: MvpConfig
744
+ embed_tokens (nn.Embedding): output embedding
745
+ use_prompt (bool): whether to use prompt
746
+ """
747
+
748
+ def __init__(
749
+ self, config: MvpConfig, embed_tokens: Optional[nn.Embedding] = None, use_prompt: Optional[bool] = False
750
+ ):
751
+ super().__init__(config)
752
+
753
+ self.dropout = config.dropout
754
+ self.layerdrop = config.encoder_layerdrop
755
+
756
+ embed_dim = config.d_model
757
+ self.padding_idx = config.pad_token_id
758
+ self.max_source_positions = config.max_position_embeddings
759
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
760
+
761
+ if embed_tokens is not None:
762
+ self.embed_tokens = embed_tokens
763
+ else:
764
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
765
+
766
+ self.embed_positions = MvpLearnedPositionalEmbedding(
767
+ config.max_position_embeddings,
768
+ embed_dim,
769
+ )
770
+ self.layers = nn.ModuleList([MvpEncoderLayer(config) for _ in range(config.encoder_layers)])
771
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
772
+
773
+ self.use_prompt = use_prompt
774
+ if use_prompt:
775
+ self.prompt_length = config.prompt_length
776
+ self.self_attn_prompt = MvpPrompt(
777
+ config,
778
+ config.encoder_layers,
779
+ config.encoder_attention_heads,
780
+ )
781
+
782
+ self.gradient_checkpointing = False
783
+ # Initialize weights and apply final processing
784
+ self.post_init()
785
+
786
+ def get_input_embeddings(self):
787
+ return self.embed_tokens
788
+
789
+ def set_input_embeddings(self, value):
790
+ self.embed_tokens = value
791
+
792
+ def forward(
793
+ self,
794
+ input_ids: torch.LongTensor = None,
795
+ attention_mask: Optional[torch.Tensor] = None,
796
+ head_mask: Optional[torch.Tensor] = None,
797
+ inputs_embeds: Optional[torch.FloatTensor] = None,
798
+ output_attentions: Optional[bool] = None,
799
+ output_hidden_states: Optional[bool] = None,
800
+ return_dict: Optional[bool] = None,
801
+ ) -> Union[Tuple, BaseModelOutput]:
802
+ r"""
803
+ Args:
804
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
805
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
806
+ provide it.
807
+
808
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
809
+ [`PreTrainedTokenizer.__call__`] for details.
810
+
811
+ [What are input IDs?](../glossary#input-ids)
812
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
813
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
814
+
815
+ - 1 for tokens that are **not masked**,
816
+ - 0 for tokens that are **masked**.
817
+
818
+ [What are attention masks?](../glossary#attention-mask)
819
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
820
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
821
+
822
+ - 1 indicates the head is **not masked**,
823
+ - 0 indicates the head is **masked**.
824
+
825
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
826
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
827
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
828
+ than the model's internal embedding lookup matrix.
829
+ output_attentions (`bool`, *optional*):
830
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
831
+ returned tensors for more detail.
832
+ output_hidden_states (`bool`, *optional*):
833
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
834
+ for more detail.
835
+ return_dict (`bool`, *optional*):
836
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
837
+ """
838
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
839
+ output_hidden_states = (
840
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
841
+ )
842
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
843
+
844
+ # retrieve input_ids and inputs_embeds
845
+ if input_ids is not None and inputs_embeds is not None:
846
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
847
+ elif input_ids is not None:
848
+ input = input_ids
849
+ input_shape = input.shape
850
+ input_ids = input_ids.view(-1, input_shape[-1])
851
+ elif inputs_embeds is not None:
852
+ input_shape = inputs_embeds.size()[:-1]
853
+ input = inputs_embeds[:, :, -1]
854
+ else:
855
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
856
+
857
+ if inputs_embeds is None:
858
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
859
+
860
+ embed_pos = self.embed_positions(input)
861
+
862
+ hidden_states = inputs_embeds + embed_pos
863
+ hidden_states = self.layernorm_embedding(hidden_states)
864
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
865
+
866
+ # layer-wise prompt
867
+ if self.use_prompt:
868
+ prompt_ids = torch.arange(self.prompt_length).to(self.device)
869
+ self_attn_prompt = self.self_attn_prompt(prompt_ids)
870
+
871
+ # expand attention_mask
872
+ if attention_mask is not None:
873
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
874
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
875
+
876
+ encoder_states = () if output_hidden_states else None
877
+ all_attentions = () if output_attentions else None
878
+
879
+ # check if head_mask has a correct number of layers specified if desired
880
+ if head_mask is not None:
881
+ if head_mask.size()[0] != (len(self.layers)):
882
+ raise ValueError(
883
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
884
+ f" {head_mask.size()[0]}."
885
+ )
886
+
887
+ for idx, encoder_layer in enumerate(self.layers):
888
+ if output_hidden_states:
889
+ encoder_states = encoder_states + (hidden_states,)
890
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
891
+ to_drop = False
892
+ if self.training:
893
+ dropout_probability = torch.rand([])
894
+ if dropout_probability < self.layerdrop: # skip the layer
895
+ to_drop = True
896
+
897
+ if to_drop:
898
+ layer_outputs = (None, None)
899
+ else:
900
+ if self.gradient_checkpointing and self.training:
901
+ layer_outputs = self._gradient_checkpointing_func(
902
+ encoder_layer.__call__,
903
+ hidden_states,
904
+ attention_mask,
905
+ (head_mask[idx] if head_mask is not None else None),
906
+ (self_attn_prompt[idx] if self.use_prompt else None),
907
+ output_attentions,
908
+ )
909
+ else:
910
+ layer_outputs = encoder_layer(
911
+ hidden_states,
912
+ attention_mask,
913
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
914
+ self_attn_prompt=(self_attn_prompt[idx] if self.use_prompt else None),
915
+ output_attentions=output_attentions,
916
+ )
917
+
918
+ hidden_states = layer_outputs[0]
919
+
920
+ if output_attentions:
921
+ all_attentions = all_attentions + (layer_outputs[1],)
922
+
923
+ if output_hidden_states:
924
+ encoder_states = encoder_states + (hidden_states,)
925
+
926
+ if not return_dict:
927
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
928
+ return BaseModelOutput(
929
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
930
+ )
931
+
932
+
933
+ class MvpDecoder(MvpPreTrainedModel):
934
+ """
935
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MvpDecoderLayer`]
936
+
937
+ Args:
938
+ config: MvpConfig
939
+ embed_tokens (nn.Embedding): output embedding
940
+ use_prompt (bool): whether to use prompt
941
+ """
942
+
943
+ def __init__(
944
+ self, config: MvpConfig, embed_tokens: Optional[nn.Embedding] = None, use_prompt: Optional[bool] = False
945
+ ):
946
+ super().__init__(config)
947
+ self.dropout = config.dropout
948
+ self.layerdrop = config.decoder_layerdrop
949
+ self.padding_idx = config.pad_token_id
950
+ self.max_target_positions = config.max_position_embeddings
951
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
952
+
953
+ if embed_tokens is not None:
954
+ self.embed_tokens = embed_tokens
955
+ else:
956
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
957
+
958
+ self.embed_positions = MvpLearnedPositionalEmbedding(
959
+ config.max_position_embeddings,
960
+ config.d_model,
961
+ )
962
+ self.layers = nn.ModuleList([MvpDecoderLayer(config) for _ in range(config.decoder_layers)])
963
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
964
+
965
+ self.use_prompt = use_prompt
966
+ if use_prompt:
967
+ self.prompt_length = config.prompt_length
968
+ self.self_attn_prompt = MvpPrompt(
969
+ config,
970
+ config.decoder_layers,
971
+ config.decoder_attention_heads,
972
+ )
973
+ self.cross_attn_prompt = MvpPrompt(
974
+ config,
975
+ config.decoder_layers,
976
+ config.decoder_attention_heads,
977
+ )
978
+
979
+ self.gradient_checkpointing = False
980
+ # Initialize weights and apply final processing
981
+ self.post_init()
982
+
983
+ def get_input_embeddings(self):
984
+ return self.embed_tokens
985
+
986
+ def set_input_embeddings(self, value):
987
+ self.embed_tokens = value
988
+
989
+ def forward(
990
+ self,
991
+ input_ids: torch.LongTensor = None,
992
+ attention_mask: Optional[torch.Tensor] = None,
993
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
994
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
995
+ head_mask: Optional[torch.Tensor] = None,
996
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
997
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
998
+ inputs_embeds: Optional[torch.FloatTensor] = None,
999
+ use_cache: Optional[bool] = None,
1000
+ output_attentions: Optional[bool] = None,
1001
+ output_hidden_states: Optional[bool] = None,
1002
+ return_dict: Optional[bool] = None,
1003
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
1004
+ r"""
1005
+ Args:
1006
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1007
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1008
+ provide it.
1009
+
1010
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1011
+ [`PreTrainedTokenizer.__call__`] for details.
1012
+
1013
+ [What are input IDs?](../glossary#input-ids)
1014
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1015
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1016
+
1017
+ - 1 for tokens that are **not masked**,
1018
+ - 0 for tokens that are **masked**.
1019
+
1020
+ [What are attention masks?](../glossary#attention-mask)
1021
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
1022
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1023
+ of the decoder.
1024
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
1025
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
1026
+ selected in `[0, 1]`:
1027
+
1028
+ - 1 for tokens that are **not masked**,
1029
+ - 0 for tokens that are **masked**.
1030
+
1031
+ [What are attention masks?](../glossary#attention-mask)
1032
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1033
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1034
+
1035
+ - 1 indicates the head is **not masked**,
1036
+ - 0 indicates the head is **masked**.
1037
+
1038
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1039
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
1040
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
1041
+
1042
+ - 1 indicates the head is **not masked**,
1043
+ - 0 indicates the head is **masked**.
1044
+
1045
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1046
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1047
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1048
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1049
+
1050
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1051
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1052
+
1053
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1054
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1055
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1056
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1057
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
1058
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
1059
+ than the model's internal embedding lookup matrix.
1060
+ output_attentions (`bool`, *optional*):
1061
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1062
+ returned tensors for more detail.
1063
+ output_hidden_states (`bool`, *optional*):
1064
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1065
+ for more detail.
1066
+ return_dict (`bool`, *optional*):
1067
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1068
+ """
1069
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1070
+ output_hidden_states = (
1071
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1072
+ )
1073
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1074
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1075
+
1076
+ # retrieve input_ids and inputs_embeds
1077
+ if input_ids is not None and inputs_embeds is not None:
1078
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1079
+ elif input_ids is not None:
1080
+ input = input_ids
1081
+ input_shape = input_ids.shape
1082
+ input_ids = input_ids.view(-1, input_shape[-1])
1083
+ elif inputs_embeds is not None:
1084
+ input_shape = inputs_embeds.size()[:-1]
1085
+ input = inputs_embeds[:, :, -1]
1086
+ else:
1087
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1088
+
1089
+ # past_key_values_length
1090
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1091
+
1092
+ if inputs_embeds is None:
1093
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1094
+
1095
+ attention_mask = _prepare_4d_causal_attention_mask(
1096
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
1097
+ )
1098
+
1099
+ # expand encoder attention mask
1100
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1101
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1102
+ encoder_attention_mask = _prepare_4d_attention_mask(
1103
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
1104
+ )
1105
+
1106
+ # embed positions
1107
+ positions = self.embed_positions(input, past_key_values_length)
1108
+
1109
+ hidden_states = inputs_embeds + positions
1110
+ hidden_states = self.layernorm_embedding(hidden_states)
1111
+
1112
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1113
+
1114
+ # layer-wise prompt
1115
+ if self.use_prompt:
1116
+ prompt_ids = torch.arange(self.prompt_length).to(self.device)
1117
+ self_attn_prompt = self.self_attn_prompt(prompt_ids)
1118
+ cross_attn_prompt = self.cross_attn_prompt(prompt_ids)
1119
+
1120
+ if self.gradient_checkpointing and self.training:
1121
+ if use_cache:
1122
+ logger.warning_once(
1123
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1124
+ )
1125
+ use_cache = False
1126
+
1127
+ # decoder layers
1128
+ all_hidden_states = () if output_hidden_states else None
1129
+ all_self_attns = () if output_attentions else None
1130
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
1131
+ next_decoder_cache = () if use_cache else None
1132
+
1133
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
1134
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
1135
+ if attn_mask is not None:
1136
+ if attn_mask.size()[0] != (len(self.layers)):
1137
+ raise ValueError(
1138
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
1139
+ f" {head_mask.size()[0]}."
1140
+ )
1141
+
1142
+ for idx, decoder_layer in enumerate(self.layers):
1143
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1144
+ if output_hidden_states:
1145
+ all_hidden_states += (hidden_states,)
1146
+ if self.training:
1147
+ dropout_probability = torch.rand([])
1148
+ if dropout_probability < self.layerdrop:
1149
+ continue
1150
+
1151
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1152
+
1153
+ if self.gradient_checkpointing and self.training:
1154
+ layer_outputs = self._gradient_checkpointing_func(
1155
+ decoder_layer.__call__,
1156
+ hidden_states,
1157
+ attention_mask,
1158
+ encoder_hidden_states,
1159
+ encoder_attention_mask,
1160
+ head_mask[idx] if head_mask is not None else None,
1161
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
1162
+ self_attn_prompt[idx] if self.use_prompt else None,
1163
+ cross_attn_prompt[idx] if self.use_prompt else None,
1164
+ None,
1165
+ output_attentions,
1166
+ use_cache,
1167
+ )
1168
+ else:
1169
+ layer_outputs = decoder_layer(
1170
+ hidden_states,
1171
+ attention_mask=attention_mask,
1172
+ encoder_hidden_states=encoder_hidden_states,
1173
+ encoder_attention_mask=encoder_attention_mask,
1174
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
1175
+ cross_attn_layer_head_mask=(
1176
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
1177
+ ),
1178
+ self_attn_prompt=(self_attn_prompt[idx] if self.use_prompt else None),
1179
+ cross_attn_prompt=(cross_attn_prompt[idx] if self.use_prompt else None),
1180
+ past_key_value=past_key_value,
1181
+ output_attentions=output_attentions,
1182
+ use_cache=use_cache,
1183
+ )
1184
+ hidden_states = layer_outputs[0]
1185
+
1186
+ if use_cache:
1187
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1188
+
1189
+ if output_attentions:
1190
+ all_self_attns += (layer_outputs[1],)
1191
+
1192
+ if encoder_hidden_states is not None:
1193
+ all_cross_attentions += (layer_outputs[2],)
1194
+
1195
+ # add hidden states from the last decoder layer
1196
+ if output_hidden_states:
1197
+ all_hidden_states += (hidden_states,)
1198
+
1199
+ next_cache = next_decoder_cache if use_cache else None
1200
+ if not return_dict:
1201
+ return tuple(
1202
+ v
1203
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1204
+ if v is not None
1205
+ )
1206
+ return BaseModelOutputWithPastAndCrossAttentions(
1207
+ last_hidden_state=hidden_states,
1208
+ past_key_values=next_cache,
1209
+ hidden_states=all_hidden_states,
1210
+ attentions=all_self_attns,
1211
+ cross_attentions=all_cross_attentions,
1212
+ )
1213
+
1214
+
1215
+ @add_start_docstrings(
1216
+ "The bare MVP Model outputting raw hidden-states without any specific head on top.",
1217
+ MVP_START_DOCSTRING,
1218
+ )
1219
+ class MvpModel(MvpPreTrainedModel):
1220
+ _keys_to_ignore_on_load_unexpected = ["final_logits_bias"]
1221
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1222
+
1223
+ def __init__(self, config: MvpConfig):
1224
+ super().__init__(config)
1225
+
1226
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
1227
+ self.use_prompt = config.use_prompt
1228
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
1229
+
1230
+ self.encoder = MvpEncoder(config, self.shared, config.use_prompt)
1231
+ self.decoder = MvpDecoder(config, self.shared, config.use_prompt)
1232
+
1233
+ # Initialize weights and apply final processing
1234
+ self.post_init()
1235
+
1236
+ def get_input_embeddings(self):
1237
+ return self.shared
1238
+
1239
+ def set_input_embeddings(self, value):
1240
+ self.shared = value
1241
+ self.encoder.embed_tokens = self.shared
1242
+ self.decoder.embed_tokens = self.shared
1243
+
1244
+ def get_encoder(self):
1245
+ return self.encoder
1246
+
1247
+ def get_decoder(self):
1248
+ return self.decoder
1249
+
1250
+ def set_lightweight_tuning(self):
1251
+ assert self.use_prompt, "If you want to use lightweight tuning, make sure that `use_prompt=True`."
1252
+
1253
+ self.requires_grad_(False)
1254
+ self.encoder.self_attn_prompt.requires_grad_(True)
1255
+ self.decoder.self_attn_prompt.requires_grad_(True)
1256
+ self.decoder.cross_attn_prompt.requires_grad_(True)
1257
+
1258
+ @add_start_docstrings_to_model_forward(MVP_INPUTS_DOCSTRING)
1259
+ @add_code_sample_docstrings(
1260
+ checkpoint=_CHECKPOINT_FOR_DOC,
1261
+ output_type=Seq2SeqModelOutput,
1262
+ config_class=_CONFIG_FOR_DOC,
1263
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
1264
+ )
1265
+ def forward(
1266
+ self,
1267
+ input_ids: torch.LongTensor = None,
1268
+ attention_mask: Optional[torch.Tensor] = None,
1269
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1270
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1271
+ head_mask: Optional[torch.Tensor] = None,
1272
+ decoder_head_mask: Optional[torch.Tensor] = None,
1273
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1274
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1275
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1276
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1277
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1278
+ use_cache: Optional[bool] = None,
1279
+ output_attentions: Optional[bool] = None,
1280
+ output_hidden_states: Optional[bool] = None,
1281
+ return_dict: Optional[bool] = None,
1282
+ ) -> Union[Tuple, Seq2SeqModelOutput]:
1283
+ # different to other models, Mvp automatically creates decoder_input_ids from
1284
+ # input_ids if no decoder_input_ids are provided
1285
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1286
+ if input_ids is None:
1287
+ raise ValueError(
1288
+ "If no `decoder_input_ids` or `decoder_inputs_embeds` are "
1289
+ "passed, `input_ids` cannot be `None`. Please pass either "
1290
+ "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
1291
+ )
1292
+
1293
+ decoder_input_ids = shift_tokens_right(
1294
+ input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
1295
+ )
1296
+
1297
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1298
+ output_hidden_states = (
1299
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1300
+ )
1301
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1302
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1303
+
1304
+ if encoder_outputs is None:
1305
+ encoder_outputs = self.encoder(
1306
+ input_ids=input_ids,
1307
+ attention_mask=attention_mask,
1308
+ head_mask=head_mask,
1309
+ inputs_embeds=inputs_embeds,
1310
+ output_attentions=output_attentions,
1311
+ output_hidden_states=output_hidden_states,
1312
+ return_dict=return_dict,
1313
+ )
1314
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1315
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1316
+ encoder_outputs = BaseModelOutput(
1317
+ last_hidden_state=encoder_outputs[0],
1318
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1319
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1320
+ )
1321
+
1322
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1323
+ decoder_outputs = self.decoder(
1324
+ input_ids=decoder_input_ids,
1325
+ attention_mask=decoder_attention_mask,
1326
+ encoder_hidden_states=encoder_outputs[0],
1327
+ encoder_attention_mask=attention_mask,
1328
+ head_mask=decoder_head_mask,
1329
+ cross_attn_head_mask=cross_attn_head_mask,
1330
+ past_key_values=past_key_values,
1331
+ inputs_embeds=decoder_inputs_embeds,
1332
+ use_cache=use_cache,
1333
+ output_attentions=output_attentions,
1334
+ output_hidden_states=output_hidden_states,
1335
+ return_dict=return_dict,
1336
+ )
1337
+
1338
+ if not return_dict:
1339
+ return decoder_outputs + encoder_outputs
1340
+
1341
+ return Seq2SeqModelOutput(
1342
+ last_hidden_state=decoder_outputs.last_hidden_state,
1343
+ past_key_values=decoder_outputs.past_key_values,
1344
+ decoder_hidden_states=decoder_outputs.hidden_states,
1345
+ decoder_attentions=decoder_outputs.attentions,
1346
+ cross_attentions=decoder_outputs.cross_attentions,
1347
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1348
+ encoder_hidden_states=encoder_outputs.hidden_states,
1349
+ encoder_attentions=encoder_outputs.attentions,
1350
+ )
1351
+
1352
+
1353
+ @add_start_docstrings(
1354
+ "The MVP Model with a language modeling head. Can be used for various text generation tasks.", MVP_START_DOCSTRING
1355
+ )
1356
+ class MvpForConditionalGeneration(MvpPreTrainedModel):
1357
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
1358
+
1359
+ def __init__(self, config: MvpConfig):
1360
+ super().__init__(config)
1361
+ self.model = MvpModel(config)
1362
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
1363
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
1364
+
1365
+ # Initialize weights and apply final processing
1366
+ self.post_init()
1367
+
1368
+ def get_encoder(self):
1369
+ return self.model.get_encoder()
1370
+
1371
+ def get_decoder(self):
1372
+ return self.model.get_decoder()
1373
+
1374
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1375
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1376
+ self._resize_final_logits_bias(new_num_tokens)
1377
+ return new_embeddings
1378
+
1379
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1380
+ old_num_tokens = self.final_logits_bias.shape[-1]
1381
+ if new_num_tokens <= old_num_tokens:
1382
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1383
+ else:
1384
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1385
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1386
+ self.register_buffer("final_logits_bias", new_bias)
1387
+
1388
+ def get_output_embeddings(self):
1389
+ return self.lm_head
1390
+
1391
+ def set_output_embeddings(self, new_embeddings):
1392
+ self.lm_head = new_embeddings
1393
+
1394
+ def set_lightweight_tuning(self):
1395
+ self.model.set_lightweight_tuning()
1396
+ self.lm_head.requires_grad_(False)
1397
+
1398
+ @add_start_docstrings_to_model_forward(MVP_INPUTS_DOCSTRING)
1399
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1400
+ @add_end_docstrings(MVP_CONDITIONAL_GENERATION_EXAMPLE)
1401
+ def forward(
1402
+ self,
1403
+ input_ids: torch.LongTensor = None,
1404
+ attention_mask: Optional[torch.Tensor] = None,
1405
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1406
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1407
+ head_mask: Optional[torch.Tensor] = None,
1408
+ decoder_head_mask: Optional[torch.Tensor] = None,
1409
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1410
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1411
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1412
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1413
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1414
+ labels: Optional[torch.LongTensor] = None,
1415
+ use_cache: Optional[bool] = None,
1416
+ output_attentions: Optional[bool] = None,
1417
+ output_hidden_states: Optional[bool] = None,
1418
+ return_dict: Optional[bool] = None,
1419
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
1420
+ r"""
1421
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1422
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1423
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1424
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1425
+
1426
+ Returns:
1427
+ """
1428
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1429
+
1430
+ if labels is not None:
1431
+ if use_cache:
1432
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1433
+ use_cache = False
1434
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1435
+ decoder_input_ids = shift_tokens_right(
1436
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1437
+ )
1438
+
1439
+ outputs = self.model(
1440
+ input_ids,
1441
+ attention_mask=attention_mask,
1442
+ decoder_input_ids=decoder_input_ids,
1443
+ encoder_outputs=encoder_outputs,
1444
+ decoder_attention_mask=decoder_attention_mask,
1445
+ head_mask=head_mask,
1446
+ decoder_head_mask=decoder_head_mask,
1447
+ cross_attn_head_mask=cross_attn_head_mask,
1448
+ past_key_values=past_key_values,
1449
+ inputs_embeds=inputs_embeds,
1450
+ decoder_inputs_embeds=decoder_inputs_embeds,
1451
+ use_cache=use_cache,
1452
+ output_attentions=output_attentions,
1453
+ output_hidden_states=output_hidden_states,
1454
+ return_dict=return_dict,
1455
+ )
1456
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
1457
+
1458
+ masked_lm_loss = None
1459
+ if labels is not None:
1460
+ loss_fct = CrossEntropyLoss()
1461
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1462
+
1463
+ if not return_dict:
1464
+ output = (lm_logits,) + outputs[1:]
1465
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1466
+
1467
+ return Seq2SeqLMOutput(
1468
+ loss=masked_lm_loss,
1469
+ logits=lm_logits,
1470
+ past_key_values=outputs.past_key_values,
1471
+ decoder_hidden_states=outputs.decoder_hidden_states,
1472
+ decoder_attentions=outputs.decoder_attentions,
1473
+ cross_attentions=outputs.cross_attentions,
1474
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1475
+ encoder_hidden_states=outputs.encoder_hidden_states,
1476
+ encoder_attentions=outputs.encoder_attentions,
1477
+ )
1478
+
1479
+ def prepare_inputs_for_generation(
1480
+ self,
1481
+ decoder_input_ids,
1482
+ past_key_values=None,
1483
+ attention_mask=None,
1484
+ head_mask=None,
1485
+ decoder_head_mask=None,
1486
+ cross_attn_head_mask=None,
1487
+ use_cache=None,
1488
+ encoder_outputs=None,
1489
+ **kwargs,
1490
+ ):
1491
+ # cut decoder_input_ids if past is used
1492
+ if past_key_values is not None:
1493
+ past_length = past_key_values[0][0].shape[2]
1494
+
1495
+ # Some generation methods already pass only the last input ID
1496
+ if decoder_input_ids.shape[1] > past_length:
1497
+ remove_prefix_length = past_length
1498
+ else:
1499
+ # Default to old behavior: keep only final ID
1500
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1501
+
1502
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1503
+
1504
+ return {
1505
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1506
+ "encoder_outputs": encoder_outputs,
1507
+ "past_key_values": past_key_values,
1508
+ "decoder_input_ids": decoder_input_ids,
1509
+ "attention_mask": attention_mask,
1510
+ "head_mask": head_mask,
1511
+ "decoder_head_mask": decoder_head_mask,
1512
+ "cross_attn_head_mask": cross_attn_head_mask,
1513
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1514
+ }
1515
+
1516
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1517
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
1518
+
1519
+ @staticmethod
1520
+ def _reorder_cache(past_key_values, beam_idx):
1521
+ reordered_past = ()
1522
+ for layer_past in past_key_values:
1523
+ # cached cross_attention states don't have to be reordered -> they are always the same
1524
+ reordered_past += (
1525
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1526
+ + layer_past[2:],
1527
+ )
1528
+ return reordered_past
1529
+
1530
+
1531
+ @add_start_docstrings(
1532
+ """
1533
+ Mvp model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
1534
+ tasks.
1535
+ """,
1536
+ MVP_START_DOCSTRING,
1537
+ )
1538
+ class MvpForSequenceClassification(MvpPreTrainedModel):
1539
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1540
+
1541
+ def __init__(self, config: MvpConfig, **kwargs):
1542
+ super().__init__(config, **kwargs)
1543
+ self.model = MvpModel(config)
1544
+ self.classification_head = MvpClassificationHead(
1545
+ config.d_model,
1546
+ config.d_model,
1547
+ config.num_labels,
1548
+ config.classifier_dropout,
1549
+ )
1550
+
1551
+ # Initialize weights and apply final processing
1552
+ self.post_init()
1553
+
1554
+ def set_lightweight_tuning(self):
1555
+ self.model.set_lightweight_tuning()
1556
+ self.classification_head.requires_grad_(False)
1557
+
1558
+ @add_start_docstrings_to_model_forward(MVP_INPUTS_DOCSTRING)
1559
+ @add_end_docstrings(MVP_SEQUENCE_CLASSIFICATION_SAMPLE)
1560
+ def forward(
1561
+ self,
1562
+ input_ids: torch.LongTensor = None,
1563
+ attention_mask: Optional[torch.Tensor] = None,
1564
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1565
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1566
+ head_mask: Optional[torch.Tensor] = None,
1567
+ decoder_head_mask: Optional[torch.Tensor] = None,
1568
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1569
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1570
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1571
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1572
+ labels: Optional[torch.LongTensor] = None,
1573
+ use_cache: Optional[bool] = None,
1574
+ output_attentions: Optional[bool] = None,
1575
+ output_hidden_states: Optional[bool] = None,
1576
+ return_dict: Optional[bool] = None,
1577
+ ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]:
1578
+ r"""
1579
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1580
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1581
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1582
+ """
1583
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1584
+ if labels is not None:
1585
+ use_cache = False
1586
+
1587
+ if input_ids is None and inputs_embeds is not None:
1588
+ raise NotImplementedError(
1589
+ f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
1590
+ )
1591
+
1592
+ outputs = self.model(
1593
+ input_ids,
1594
+ attention_mask=attention_mask,
1595
+ decoder_input_ids=decoder_input_ids,
1596
+ decoder_attention_mask=decoder_attention_mask,
1597
+ head_mask=head_mask,
1598
+ decoder_head_mask=decoder_head_mask,
1599
+ cross_attn_head_mask=cross_attn_head_mask,
1600
+ encoder_outputs=encoder_outputs,
1601
+ inputs_embeds=inputs_embeds,
1602
+ decoder_inputs_embeds=decoder_inputs_embeds,
1603
+ use_cache=use_cache,
1604
+ output_attentions=output_attentions,
1605
+ output_hidden_states=output_hidden_states,
1606
+ return_dict=return_dict,
1607
+ )
1608
+ hidden_states = outputs[0] # last hidden state
1609
+
1610
+ eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
1611
+
1612
+ if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
1613
+ raise ValueError("All examples must have the same number of <eos> tokens.")
1614
+ sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
1615
+ :, -1, :
1616
+ ]
1617
+ logits = self.classification_head(sentence_representation)
1618
+
1619
+ loss = None
1620
+ if labels is not None:
1621
+ if self.config.problem_type is None:
1622
+ if self.config.num_labels == 1:
1623
+ self.config.problem_type = "regression"
1624
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1625
+ self.config.problem_type = "single_label_classification"
1626
+ else:
1627
+ self.config.problem_type = "multi_label_classification"
1628
+
1629
+ if self.config.problem_type == "regression":
1630
+ loss_fct = MSELoss()
1631
+ if self.config.num_labels == 1:
1632
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1633
+ else:
1634
+ loss = loss_fct(logits, labels)
1635
+ elif self.config.problem_type == "single_label_classification":
1636
+ loss_fct = CrossEntropyLoss()
1637
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1638
+ elif self.config.problem_type == "multi_label_classification":
1639
+ loss_fct = BCEWithLogitsLoss()
1640
+ loss = loss_fct(logits, labels)
1641
+ if not return_dict:
1642
+ output = (logits,) + outputs[1:]
1643
+ return ((loss,) + output) if loss is not None else output
1644
+
1645
+ return Seq2SeqSequenceClassifierOutput(
1646
+ loss=loss,
1647
+ logits=logits,
1648
+ past_key_values=outputs.past_key_values,
1649
+ decoder_hidden_states=outputs.decoder_hidden_states,
1650
+ decoder_attentions=outputs.decoder_attentions,
1651
+ cross_attentions=outputs.cross_attentions,
1652
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1653
+ encoder_hidden_states=outputs.encoder_hidden_states,
1654
+ encoder_attentions=outputs.encoder_attentions,
1655
+ )
1656
+
1657
+
1658
+ @add_start_docstrings(
1659
+ """
1660
+ MVP Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer
1661
+ on top of the hidden-states output to compute `span start logits` and `span end logits`).
1662
+ """,
1663
+ MVP_START_DOCSTRING,
1664
+ )
1665
+ class MvpForQuestionAnswering(MvpPreTrainedModel):
1666
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1667
+
1668
+ def __init__(self, config):
1669
+ super().__init__(config)
1670
+
1671
+ config.num_labels = 2
1672
+ self.num_labels = config.num_labels
1673
+
1674
+ self.model = MvpModel(config)
1675
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1676
+
1677
+ # Initialize weights and apply final processing
1678
+ self.post_init()
1679
+
1680
+ def set_lightweight_tuning(self):
1681
+ self.model.set_lightweight_tuning()
1682
+ self.qa_outputs.requires_grad_(False)
1683
+
1684
+ @add_start_docstrings_to_model_forward(MVP_INPUTS_DOCSTRING)
1685
+ @add_end_docstrings(MVP_QUESTION_ANSWERING_SAMPLE)
1686
+ def forward(
1687
+ self,
1688
+ input_ids: torch.Tensor = None,
1689
+ attention_mask: Optional[torch.Tensor] = None,
1690
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1691
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1692
+ head_mask: Optional[torch.Tensor] = None,
1693
+ decoder_head_mask: Optional[torch.Tensor] = None,
1694
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1695
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
1696
+ start_positions: Optional[torch.LongTensor] = None,
1697
+ end_positions: Optional[torch.LongTensor] = None,
1698
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1699
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1700
+ use_cache: Optional[bool] = None,
1701
+ output_attentions: Optional[bool] = None,
1702
+ output_hidden_states: Optional[bool] = None,
1703
+ return_dict: Optional[bool] = None,
1704
+ ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]:
1705
+ r"""
1706
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1707
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1708
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
1709
+ are not taken into account for computing the loss.
1710
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1711
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1712
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
1713
+ are not taken into account for computing the loss.
1714
+ """
1715
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1716
+ if start_positions is not None and end_positions is not None:
1717
+ use_cache = False
1718
+
1719
+ outputs = self.model(
1720
+ input_ids,
1721
+ attention_mask=attention_mask,
1722
+ decoder_input_ids=decoder_input_ids,
1723
+ decoder_attention_mask=decoder_attention_mask,
1724
+ head_mask=head_mask,
1725
+ decoder_head_mask=decoder_head_mask,
1726
+ cross_attn_head_mask=cross_attn_head_mask,
1727
+ encoder_outputs=encoder_outputs,
1728
+ inputs_embeds=inputs_embeds,
1729
+ decoder_inputs_embeds=decoder_inputs_embeds,
1730
+ use_cache=use_cache,
1731
+ output_attentions=output_attentions,
1732
+ output_hidden_states=output_hidden_states,
1733
+ return_dict=return_dict,
1734
+ )
1735
+
1736
+ sequence_output = outputs[0]
1737
+
1738
+ logits = self.qa_outputs(sequence_output)
1739
+ start_logits, end_logits = logits.split(1, dim=-1)
1740
+ start_logits = start_logits.squeeze(-1).contiguous()
1741
+ end_logits = end_logits.squeeze(-1).contiguous()
1742
+
1743
+ total_loss = None
1744
+ if start_positions is not None and end_positions is not None:
1745
+ # If we are on multi-GPU, split add a dimension
1746
+ if len(start_positions.size()) > 1:
1747
+ start_positions = start_positions.squeeze(-1)
1748
+ if len(end_positions.size()) > 1:
1749
+ end_positions = end_positions.squeeze(-1)
1750
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1751
+ ignored_index = start_logits.size(1)
1752
+ start_positions = start_positions.clamp(0, ignored_index)
1753
+ end_positions = end_positions.clamp(0, ignored_index)
1754
+
1755
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1756
+ start_loss = loss_fct(start_logits, start_positions)
1757
+ end_loss = loss_fct(end_logits, end_positions)
1758
+ total_loss = (start_loss + end_loss) / 2
1759
+
1760
+ if not return_dict:
1761
+ output = (
1762
+ start_logits,
1763
+ end_logits,
1764
+ ) + outputs[1:]
1765
+ return ((total_loss,) + output) if total_loss is not None else output
1766
+
1767
+ return Seq2SeqQuestionAnsweringModelOutput(
1768
+ loss=total_loss,
1769
+ start_logits=start_logits,
1770
+ end_logits=end_logits,
1771
+ past_key_values=outputs.past_key_values,
1772
+ decoder_hidden_states=outputs.decoder_hidden_states,
1773
+ decoder_attentions=outputs.decoder_attentions,
1774
+ cross_attentions=outputs.cross_attentions,
1775
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1776
+ encoder_hidden_states=outputs.encoder_hidden_states,
1777
+ encoder_attentions=outputs.encoder_attentions,
1778
+ )
1779
+
1780
+
1781
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Mvp
1782
+ class MvpDecoderWrapper(MvpPreTrainedModel):
1783
+ """
1784
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1785
+ used in combination with the [`EncoderDecoderModel`] framework.
1786
+ """
1787
+
1788
+ def __init__(self, config):
1789
+ super().__init__(config)
1790
+ self.decoder = MvpDecoder(config)
1791
+
1792
+ def forward(self, *args, **kwargs):
1793
+ return self.decoder(*args, **kwargs)
1794
+
1795
+
1796
+ class MvpForCausalLM(MvpPreTrainedModel):
1797
+ _tied_weights_keys = ["lm_head.weight"]
1798
+
1799
+ def __init__(self, config):
1800
+ config = copy.deepcopy(config)
1801
+ config.is_decoder = True
1802
+ config.is_encoder_decoder = False
1803
+ super().__init__(config)
1804
+ self.model = MvpDecoderWrapper(config)
1805
+
1806
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1807
+
1808
+ # Initialize weights and apply final processing
1809
+ self.post_init()
1810
+
1811
+ def get_input_embeddings(self):
1812
+ return self.model.decoder.embed_tokens
1813
+
1814
+ def set_input_embeddings(self, value):
1815
+ self.model.decoder.embed_tokens = value
1816
+
1817
+ def get_output_embeddings(self):
1818
+ return self.lm_head
1819
+
1820
+ def set_output_embeddings(self, new_embeddings):
1821
+ self.lm_head = new_embeddings
1822
+
1823
+ def set_decoder(self, decoder):
1824
+ self.model.decoder = decoder
1825
+
1826
+ def get_decoder(self):
1827
+ return self.model.decoder
1828
+
1829
+ def set_lightweight_tuning(self):
1830
+ self.model.set_lightweight_tuning()
1831
+ self.lm_head.requires_grad_(False)
1832
+
1833
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1834
+ def forward(
1835
+ self,
1836
+ input_ids: torch.LongTensor = None,
1837
+ attention_mask: Optional[torch.Tensor] = None,
1838
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1839
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1840
+ head_mask: Optional[torch.Tensor] = None,
1841
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1842
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1843
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1844
+ labels: Optional[torch.LongTensor] = None,
1845
+ use_cache: Optional[bool] = None,
1846
+ output_attentions: Optional[bool] = None,
1847
+ output_hidden_states: Optional[bool] = None,
1848
+ return_dict: Optional[bool] = None,
1849
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1850
+ r"""
1851
+ Args:
1852
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1853
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1854
+ provide it.
1855
+
1856
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1857
+ [`PreTrainedTokenizer.__call__`] for details.
1858
+
1859
+ [What are input IDs?](../glossary#input-ids)
1860
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1861
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1862
+
1863
+ - 1 for tokens that are **not masked**,
1864
+ - 0 for tokens that are **masked**.
1865
+
1866
+ [What are attention masks?](../glossary#attention-mask)
1867
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1868
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1869
+ if the model is configured as a decoder.
1870
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1871
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
1872
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1873
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1874
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
1875
+
1876
+ - 1 indicates the head is **not masked**,
1877
+ - 0 indicates the head is **masked**.
1878
+
1879
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
1880
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
1881
+
1882
+ - 1 indicates the head is **not masked**,
1883
+ - 0 indicates the head is **masked**.
1884
+
1885
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1886
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1887
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1888
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
1889
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
1890
+
1891
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1892
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1893
+
1894
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1895
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1896
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1897
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1898
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1899
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1900
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1901
+ use_cache (`bool`, *optional*):
1902
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1903
+ (see `past_key_values`).
1904
+
1905
+ - 1 for tokens that are **not masked**,
1906
+ - 0 for tokens that are **masked**.
1907
+ output_attentions (`bool`, *optional*):
1908
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1909
+ returned tensors for more detail.
1910
+ output_hidden_states (`bool`, *optional*):
1911
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1912
+ for more detail.
1913
+ return_dict (`bool`, *optional*):
1914
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1915
+
1916
+ Returns:
1917
+
1918
+ Example:
1919
+
1920
+ ```python
1921
+ >>> from transformers import AutoTokenizer, MvpForCausalLM
1922
+
1923
+ >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
1924
+ >>> model = MvpForCausalLM.from_pretrained("RUCAIBox/mvp", add_cross_attention=False)
1925
+
1926
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1927
+ >>> outputs = model(**inputs)
1928
+
1929
+ >>> logits = outputs.logits
1930
+ >>> list(logits.shape)
1931
+ [1, 8, 50267]
1932
+ ```"""
1933
+
1934
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1935
+ output_hidden_states = (
1936
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1937
+ )
1938
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1939
+
1940
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1941
+ outputs = self.model.decoder(
1942
+ input_ids=input_ids,
1943
+ attention_mask=attention_mask,
1944
+ encoder_hidden_states=encoder_hidden_states,
1945
+ encoder_attention_mask=encoder_attention_mask,
1946
+ head_mask=head_mask,
1947
+ cross_attn_head_mask=cross_attn_head_mask,
1948
+ past_key_values=past_key_values,
1949
+ inputs_embeds=inputs_embeds,
1950
+ use_cache=use_cache,
1951
+ output_attentions=output_attentions,
1952
+ output_hidden_states=output_hidden_states,
1953
+ return_dict=return_dict,
1954
+ )
1955
+
1956
+ logits = self.lm_head(outputs[0])
1957
+
1958
+ loss = None
1959
+ if labels is not None:
1960
+ loss_fct = CrossEntropyLoss()
1961
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
1962
+
1963
+ if not return_dict:
1964
+ output = (logits,) + outputs[1:]
1965
+ return (loss,) + output if loss is not None else output
1966
+
1967
+ return CausalLMOutputWithCrossAttentions(
1968
+ loss=loss,
1969
+ logits=logits,
1970
+ past_key_values=outputs.past_key_values,
1971
+ hidden_states=outputs.hidden_states,
1972
+ attentions=outputs.attentions,
1973
+ cross_attentions=outputs.cross_attentions,
1974
+ )
1975
+
1976
+ def prepare_inputs_for_generation(
1977
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
1978
+ ):
1979
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1980
+ if attention_mask is None:
1981
+ attention_mask = input_ids.new_ones(input_ids.shape)
1982
+
1983
+ if past_key_values:
1984
+ past_length = past_key_values[0][0].shape[2]
1985
+
1986
+ # Some generation methods already pass only the last input ID
1987
+ if input_ids.shape[1] > past_length:
1988
+ remove_prefix_length = past_length
1989
+ else:
1990
+ # Default to old behavior: keep only final ID
1991
+ remove_prefix_length = input_ids.shape[1] - 1
1992
+
1993
+ input_ids = input_ids[:, remove_prefix_length:]
1994
+ # first step, decoder_cached_states are empty
1995
+ return {
1996
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
1997
+ "attention_mask": attention_mask,
1998
+ "past_key_values": past_key_values,
1999
+ "use_cache": use_cache,
2000
+ }
2001
+
2002
+ @staticmethod
2003
+ def _reorder_cache(past_key_values, beam_idx):
2004
+ reordered_past = ()
2005
+ for layer_past in past_key_values:
2006
+ reordered_past += (
2007
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
2008
+ )
2009
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/mvp/tokenization_mvp.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from functools import lru_cache
19
+ from typing import List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
31
+
32
+ # See all MVP models at https://huggingface.co/models?filter=mvp
33
+
34
+
35
+ @lru_cache()
36
+ def bytes_to_unicode():
37
+ """
38
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
39
+ characters the bpe code barfs on.
40
+
41
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
42
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
43
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
44
+ tables between utf-8 bytes and unicode strings.
45
+ """
46
+ bs = (
47
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
48
+ )
49
+ cs = bs[:]
50
+ n = 0
51
+ for b in range(2**8):
52
+ if b not in bs:
53
+ bs.append(b)
54
+ cs.append(2**8 + n)
55
+ n += 1
56
+ cs = [chr(n) for n in cs]
57
+ return dict(zip(bs, cs))
58
+
59
+
60
+ def get_pairs(word):
61
+ """
62
+ Return set of symbol pairs in a word.
63
+
64
+ Word is represented as tuple of symbols (symbols being variable-length strings).
65
+ """
66
+ pairs = set()
67
+ prev_char = word[0]
68
+ for char in word[1:]:
69
+ pairs.add((prev_char, char))
70
+ prev_char = char
71
+ return pairs
72
+
73
+
74
+ class MvpTokenizer(PreTrainedTokenizer):
75
+ """
76
+ Constructs a MVP tokenizer, which is smilar to the RoBERTa tokenizer, using byte-level Byte-Pair-Encoding.
77
+
78
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
79
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
80
+
81
+ ```python
82
+ >>> from transformers import MvpTokenizer
83
+
84
+ >>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp")
85
+ >>> tokenizer("Hello world")["input_ids"]
86
+ [0, 31414, 232, 2]
87
+
88
+ >>> tokenizer(" Hello world")["input_ids"]
89
+ [0, 20920, 232, 2]
90
+ ```
91
+
92
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
93
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
94
+
95
+ <Tip>
96
+
97
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
98
+
99
+ </Tip>
100
+
101
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
102
+ this superclass for more information regarding those methods.
103
+
104
+ Args:
105
+ vocab_file (`str`):
106
+ Path to the vocabulary file.
107
+ merges_file (`str`):
108
+ Path to the merges file.
109
+ errors (`str`, *optional*, defaults to `"replace"`):
110
+ Paradigm to follow when decoding bytes to UTF-8. See
111
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
112
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
113
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
114
+
115
+ <Tip>
116
+
117
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
118
+ sequence. The token used is the `cls_token`.
119
+
120
+ </Tip>
121
+
122
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
123
+ The end of sequence token.
124
+
125
+ <Tip>
126
+
127
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
128
+ The token used is the `sep_token`.
129
+
130
+ </Tip>
131
+
132
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
133
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
134
+ sequence classification or for a text and a question for question answering. It is also used as the last
135
+ token of a sequence built with special tokens.
136
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
137
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
138
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
139
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
140
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
141
+ token instead.
142
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
143
+ The token used for padding, for example when batching sequences of different lengths.
144
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
145
+ The token used for masking values. This is the token used when training this model with masked language
146
+ modeling. This is the token which the model will try to predict.
147
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
148
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
149
+ other word. (MVP tokenizer detect beginning of words by the preceding space).
150
+ """
151
+
152
+ vocab_files_names = VOCAB_FILES_NAMES
153
+ model_input_names = ["input_ids", "attention_mask"]
154
+
155
+ def __init__(
156
+ self,
157
+ vocab_file,
158
+ merges_file,
159
+ errors="replace",
160
+ bos_token="<s>",
161
+ eos_token="</s>",
162
+ sep_token="</s>",
163
+ cls_token="<s>",
164
+ unk_token="<unk>",
165
+ pad_token="<pad>",
166
+ mask_token="<mask>",
167
+ add_prefix_space=False,
168
+ **kwargs,
169
+ ):
170
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
171
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
172
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
173
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
174
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
175
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
176
+
177
+ # Mask token behave like a normal word, i.e. include the space before it
178
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
179
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
180
+ self.encoder = json.load(vocab_handle)
181
+ self.decoder = {v: k for k, v in self.encoder.items()}
182
+ self.errors = errors # how to handle errors in decoding
183
+ self.byte_encoder = bytes_to_unicode()
184
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
185
+ with open(merges_file, encoding="utf-8") as merges_handle:
186
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
187
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
188
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
189
+ self.cache = {}
190
+ self.add_prefix_space = add_prefix_space
191
+
192
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
193
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
194
+
195
+ super().__init__(
196
+ errors=errors,
197
+ bos_token=bos_token,
198
+ eos_token=eos_token,
199
+ unk_token=unk_token,
200
+ sep_token=sep_token,
201
+ cls_token=cls_token,
202
+ pad_token=pad_token,
203
+ mask_token=mask_token,
204
+ add_prefix_space=add_prefix_space,
205
+ **kwargs,
206
+ )
207
+
208
+ @property
209
+ def vocab_size(self):
210
+ return len(self.encoder)
211
+
212
+ def get_vocab(self):
213
+ vocab = self.encoder.copy()
214
+ vocab.update(self.added_tokens_encoder)
215
+ return vocab
216
+
217
+ def bpe(self, token):
218
+ if token in self.cache:
219
+ return self.cache[token]
220
+ word = tuple(token)
221
+ pairs = get_pairs(word)
222
+
223
+ if not pairs:
224
+ return token
225
+
226
+ while True:
227
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
228
+ if bigram not in self.bpe_ranks:
229
+ break
230
+ first, second = bigram
231
+ new_word = []
232
+ i = 0
233
+ while i < len(word):
234
+ try:
235
+ j = word.index(first, i)
236
+ except ValueError:
237
+ new_word.extend(word[i:])
238
+ break
239
+ else:
240
+ new_word.extend(word[i:j])
241
+ i = j
242
+
243
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
244
+ new_word.append(first + second)
245
+ i += 2
246
+ else:
247
+ new_word.append(word[i])
248
+ i += 1
249
+ new_word = tuple(new_word)
250
+ word = new_word
251
+ if len(word) == 1:
252
+ break
253
+ else:
254
+ pairs = get_pairs(word)
255
+ word = " ".join(word)
256
+ self.cache[token] = word
257
+ return word
258
+
259
+ def _tokenize(self, text):
260
+ """Tokenize a string."""
261
+ bpe_tokens = []
262
+ for token in re.findall(self.pat, text):
263
+ token = "".join(
264
+ self.byte_encoder[b] for b in token.encode("utf-8")
265
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
266
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
267
+ return bpe_tokens
268
+
269
+ def _convert_token_to_id(self, token):
270
+ """Converts a token (str) in an id using the vocab."""
271
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
272
+
273
+ def _convert_id_to_token(self, index):
274
+ """Converts an index (integer) in a token (str) using the vocab."""
275
+ return self.decoder.get(index)
276
+
277
+ def convert_tokens_to_string(self, tokens):
278
+ """Converts a sequence of tokens (string) in a single string."""
279
+ text = "".join(tokens)
280
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
281
+ return text
282
+
283
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
284
+ if not os.path.isdir(save_directory):
285
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
286
+ return
287
+ vocab_file = os.path.join(
288
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
289
+ )
290
+ merge_file = os.path.join(
291
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
292
+ )
293
+
294
+ with open(vocab_file, "w", encoding="utf-8") as f:
295
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
296
+
297
+ index = 0
298
+ with open(merge_file, "w", encoding="utf-8") as writer:
299
+ writer.write("#version: 0.2\n")
300
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
301
+ if index != token_index:
302
+ logger.warning(
303
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
304
+ " Please check that the tokenizer is not corrupted!"
305
+ )
306
+ index = token_index
307
+ writer.write(" ".join(bpe_tokens) + "\n")
308
+ index += 1
309
+
310
+ return vocab_file, merge_file
311
+
312
+ def build_inputs_with_special_tokens(
313
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
314
+ ) -> List[int]:
315
+ """
316
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
317
+ adding special tokens. A MVP sequence has the following format:
318
+
319
+ - single sequence: `<s> X </s>`
320
+ - pair of sequences: `<s> A </s></s> B </s>`
321
+
322
+ Args:
323
+ token_ids_0 (`List[int]`):
324
+ List of IDs to which the special tokens will be added.
325
+ token_ids_1 (`List[int]`, *optional*):
326
+ Optional second list of IDs for sequence pairs.
327
+
328
+ Returns:
329
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
330
+ """
331
+ if token_ids_1 is None:
332
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
333
+ cls = [self.cls_token_id]
334
+ sep = [self.sep_token_id]
335
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
336
+
337
+ def get_special_tokens_mask(
338
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
339
+ ) -> List[int]:
340
+ """
341
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
342
+ special tokens using the tokenizer `prepare_for_model` method.
343
+
344
+ Args:
345
+ token_ids_0 (`List[int]`):
346
+ List of IDs.
347
+ token_ids_1 (`List[int]`, *optional*):
348
+ Optional second list of IDs for sequence pairs.
349
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
350
+ Whether or not the token list is already formatted with special tokens for the model.
351
+
352
+ Returns:
353
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
354
+ """
355
+ if already_has_special_tokens:
356
+ return super().get_special_tokens_mask(
357
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
358
+ )
359
+
360
+ if token_ids_1 is None:
361
+ return [1] + ([0] * len(token_ids_0)) + [1]
362
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
363
+
364
+ def create_token_type_ids_from_sequences(
365
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
366
+ ) -> List[int]:
367
+ """
368
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not
369
+ make use of token type ids, therefore a list of zeros is returned.
370
+
371
+ Args:
372
+ token_ids_0 (`List[int]`):
373
+ List of IDs.
374
+ token_ids_1 (`List[int]`, *optional*):
375
+ Optional second list of IDs for sequence pairs.
376
+
377
+ Returns:
378
+ `List[int]`: List of zeros.
379
+ """
380
+ sep = [self.sep_token_id]
381
+ cls = [self.cls_token_id]
382
+
383
+ if token_ids_1 is None:
384
+ return len(cls + token_ids_0 + sep) * [0]
385
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
386
+
387
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
388
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
389
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
390
+ text = " " + text
391
+ return (text, kwargs)
venv/lib/python3.10/site-packages/transformers/models/mvp/tokenization_mvp_fast.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import pre_tokenizers, processors
20
+
21
+ from ...tokenization_utils_base import AddedToken, BatchEncoding
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_mvp import MvpTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+ # See all MVP models at https://huggingface.co/models?filter=mvp
33
+
34
+
35
+ class MvpTokenizerFast(PreTrainedTokenizerFast):
36
+ r"""
37
+ Construct a "fast" MVP tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer,
38
+ using byte-level Byte-Pair-Encoding.
39
+
40
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
41
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
42
+
43
+ ```python
44
+ >>> from transformers import MvpTokenizerFast
45
+
46
+ >>> tokenizer = MvpTokenizerFast.from_pretrained("RUCAIBox/mvp")
47
+ >>> tokenizer("Hello world")["input_ids"]
48
+ [0, 31414, 232, 2]
49
+
50
+ >>> tokenizer(" Hello world")["input_ids"]
51
+ [0, 20920, 232, 2]
52
+ ```
53
+
54
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
55
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
56
+
57
+ <Tip>
58
+
59
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
60
+
61
+ </Tip>
62
+
63
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
64
+ refer to this superclass for more information regarding those methods.
65
+
66
+ Args:
67
+ vocab_file (`str`):
68
+ Path to the vocabulary file.
69
+ merges_file (`str`):
70
+ Path to the merges file.
71
+ errors (`str`, *optional*, defaults to `"replace"`):
72
+ Paradigm to follow when decoding bytes to UTF-8. See
73
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
74
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
75
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
76
+
77
+ <Tip>
78
+
79
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
80
+ sequence. The token used is the `cls_token`.
81
+
82
+ </Tip>
83
+
84
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
85
+ The end of sequence token.
86
+
87
+ <Tip>
88
+
89
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
90
+ The token used is the `sep_token`.
91
+
92
+ </Tip>
93
+
94
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
95
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
96
+ sequence classification or for a text and a question for question answering. It is also used as the last
97
+ token of a sequence built with special tokens.
98
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
99
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
100
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
101
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
102
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
103
+ token instead.
104
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
105
+ The token used for padding, for example when batching sequences of different lengths.
106
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
107
+ The token used for masking values. This is the token used when training this model with masked language
108
+ modeling. This is the token which the model will try to predict.
109
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
111
+ other word. (MVP tokenizer detect beginning of words by the preceding space).
112
+ trim_offsets (`bool`, *optional*, defaults to `True`):
113
+ Whether the post processing step should trim offsets to avoid including whitespaces.
114
+ """
115
+
116
+ vocab_files_names = VOCAB_FILES_NAMES
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+ slow_tokenizer_class = MvpTokenizer
119
+
120
+ def __init__(
121
+ self,
122
+ vocab_file=None,
123
+ merges_file=None,
124
+ tokenizer_file=None,
125
+ errors="replace",
126
+ bos_token="<s>",
127
+ eos_token="</s>",
128
+ sep_token="</s>",
129
+ cls_token="<s>",
130
+ unk_token="<unk>",
131
+ pad_token="<pad>",
132
+ mask_token="<mask>",
133
+ add_prefix_space=False,
134
+ trim_offsets=True,
135
+ **kwargs,
136
+ ):
137
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
138
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
139
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
140
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
141
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
142
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
143
+
144
+ # Mask token behave like a normal word, i.e. include the space before it
145
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
146
+ super().__init__(
147
+ vocab_file,
148
+ merges_file,
149
+ tokenizer_file=tokenizer_file,
150
+ errors=errors,
151
+ bos_token=bos_token,
152
+ eos_token=eos_token,
153
+ sep_token=sep_token,
154
+ cls_token=cls_token,
155
+ unk_token=unk_token,
156
+ pad_token=pad_token,
157
+ mask_token=mask_token,
158
+ add_prefix_space=add_prefix_space,
159
+ trim_offsets=trim_offsets,
160
+ **kwargs,
161
+ )
162
+
163
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
164
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
165
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
166
+ pre_tok_state["add_prefix_space"] = add_prefix_space
167
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
168
+
169
+ self.add_prefix_space = add_prefix_space
170
+
171
+ # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
172
+ tokenizer_component = "post_processor"
173
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
174
+ if tokenizer_component_instance:
175
+ state = json.loads(tokenizer_component_instance.__getstate__())
176
+
177
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
178
+ if "sep" in state:
179
+ state["sep"] = tuple(state["sep"])
180
+ if "cls" in state:
181
+ state["cls"] = tuple(state["cls"])
182
+
183
+ changes_to_apply = False
184
+
185
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
186
+ state["add_prefix_space"] = add_prefix_space
187
+ changes_to_apply = True
188
+
189
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
190
+ state["trim_offsets"] = trim_offsets
191
+ changes_to_apply = True
192
+
193
+ if changes_to_apply:
194
+ component_class = getattr(processors, state.pop("type"))
195
+ new_value = component_class(**state)
196
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
197
+
198
+ @property
199
+ def mask_token(self) -> str:
200
+ """
201
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
202
+ having been set.
203
+
204
+ MVP tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
205
+ comprise the space before the *<mask>*.
206
+ """
207
+ if self._mask_token is None:
208
+ if self.verbose:
209
+ logger.error("Using mask_token, but it is not set yet.")
210
+ return None
211
+ return str(self._mask_token)
212
+
213
+ @mask_token.setter
214
+ def mask_token(self, value):
215
+ """
216
+ Overriding the default behavior of the mask token to have it eat the space before it.
217
+
218
+ This is needed to preserve backward compatibility with all the previously used models based on Mvp.
219
+ """
220
+ # Mask token behave like a normal word, i.e. include the space before it
221
+ # So we set lstrip to True
222
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
223
+ self._mask_token = value
224
+
225
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
226
+ is_split_into_words = kwargs.get("is_split_into_words", False)
227
+
228
+ if is_split_into_words and not self.add_prefix_space:
229
+ raise ValueError(
230
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
231
+ "to use it with pretokenized inputs."
232
+ )
233
+
234
+ return super()._batch_encode_plus(*args, **kwargs)
235
+
236
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
237
+ is_split_into_words = kwargs.get("is_split_into_words", False)
238
+
239
+ if is_split_into_words and not self.add_prefix_space:
240
+ raise ValueError(
241
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
242
+ "to use it with pretokenized inputs."
243
+ )
244
+
245
+ return super()._encode_plus(*args, **kwargs)
246
+
247
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
248
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
249
+ return tuple(files)
250
+
251
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
252
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
253
+ if token_ids_1 is None:
254
+ return output
255
+
256
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
257
+
258
+ def create_token_type_ids_from_sequences(
259
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
260
+ ) -> List[int]:
261
+ """
262
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not
263
+ make use of token type ids, therefore a list of zeros is returned.
264
+
265
+ Args:
266
+ token_ids_0 (`List[int]`):
267
+ List of IDs.
268
+ token_ids_1 (`List[int]`, *optional*):
269
+ Optional second list of IDs for sequence pairs.
270
+
271
+ Returns:
272
+ `List[int]`: List of zeros.
273
+ """
274
+ sep = [self.sep_token_id]
275
+ cls = [self.cls_token_id]
276
+
277
+ if token_ids_1 is None:
278
+ return len(cls + token_ids_0 + sep) * [0]
279
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]