Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/_archive_maps.py +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py +56 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py +184 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py +288 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py +792 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py +142 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py +45 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py +42 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py +408 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py +29 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py +1467 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py +63 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py +155 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py +70 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py +606 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py +189 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +121 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py +1122 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py +179 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py +1295 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py +82 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py +112 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py +1058 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py +405 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__init__.py +107 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/convert_fnet_original_flax_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/modeling_fnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet_fast.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/configuration_fnet.py +119 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py +157 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/_archive_maps.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {
|
20 |
+
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
|
21 |
+
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
|
22 |
+
"processing_mctct": ["MCTCTProcessor"],
|
23 |
+
}
|
24 |
+
|
25 |
+
|
26 |
+
try:
|
27 |
+
if not is_torch_available():
|
28 |
+
raise OptionalDependencyNotAvailable()
|
29 |
+
except OptionalDependencyNotAvailable:
|
30 |
+
pass
|
31 |
+
else:
|
32 |
+
_import_structure["modeling_mctct"] = [
|
33 |
+
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
34 |
+
"MCTCTForCTC",
|
35 |
+
"MCTCTModel",
|
36 |
+
"MCTCTPreTrainedModel",
|
37 |
+
]
|
38 |
+
|
39 |
+
|
40 |
+
if TYPE_CHECKING:
|
41 |
+
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
|
42 |
+
from .feature_extraction_mctct import MCTCTFeatureExtractor
|
43 |
+
from .processing_mctct import MCTCTProcessor
|
44 |
+
|
45 |
+
try:
|
46 |
+
if not is_torch_available():
|
47 |
+
raise OptionalDependencyNotAvailable()
|
48 |
+
except OptionalDependencyNotAvailable:
|
49 |
+
pass
|
50 |
+
else:
|
51 |
+
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
|
52 |
+
|
53 |
+
else:
|
54 |
+
import sys
|
55 |
+
|
56 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.07 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc
ADDED
Binary file (7.88 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc
ADDED
Binary file (22.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc
ADDED
Binary file (4.82 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py
ADDED
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""M-CTC-T model configuration"""
|
16 |
+
|
17 |
+
from ....configuration_utils import PretrainedConfig
|
18 |
+
from ....utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from .._archive_maps import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class MCTCTConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
|
30 |
+
M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
31 |
+
with the defaults will yield a similar configuration to that of the M-CTC-T
|
32 |
+
[speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_size (`int`, *optional*, defaults to 8065):
|
40 |
+
Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
|
41 |
+
`inputs_ids` passed when calling [`MCTCTModel`].
|
42 |
+
hidden_size (`int`, *optional*, defaults to 1536):
|
43 |
+
Dimension of the encoder layers and the pooler layer.
|
44 |
+
num_hidden_layers (`int`, *optional*, defaults to 36):
|
45 |
+
Number of hidden layers in the Transformer encoder.
|
46 |
+
intermediate_size (`int`, *optional*, defaults to 6144):
|
47 |
+
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
48 |
+
num_attention_heads (`int`, *optional*, defaults to 4):
|
49 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
50 |
+
attention_head_dim (`int`, *optional*, defaults to 384):
|
51 |
+
Dimensions of each attention head for each attention layer in the Transformer encoder.
|
52 |
+
max_position_embeddings (`int`, *optional*, defaults to 920):
|
53 |
+
The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
|
54 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
55 |
+
The epsilon used by the layer normalization layers.
|
56 |
+
layerdrop (`float`, *optional*, defaults to 0.3):
|
57 |
+
The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
|
58 |
+
implementation.
|
59 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
|
60 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
61 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
62 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
63 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
64 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.3):
|
65 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
66 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.3):
|
67 |
+
The dropout ratio for the attention probabilities.
|
68 |
+
pad_token_id (`int`, *optional*, defaults to 1):
|
69 |
+
The tokenizer index of the pad token.
|
70 |
+
bos_token_id (`int`, *optional*, defaults to 0):
|
71 |
+
The tokenizer index of the bos token.
|
72 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
73 |
+
The tokenizer index of the eos token.
|
74 |
+
conv_glu_dim (`int`, *optional*, defaults to 1):
|
75 |
+
The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
|
76 |
+
Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
|
77 |
+
conv_dropout (`int`, *optional*, defaults to 0.3):
|
78 |
+
The probability of randomly dropping the `Conv1dSubsampler` layer during training.
|
79 |
+
num_conv_layers (`int`, *optional*, defaults to 1):
|
80 |
+
Number of convolution layers before applying transformer encoder layers.
|
81 |
+
conv_kernel (`Sequence[int]`, *optional*, defaults to `(7,)`):
|
82 |
+
The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
|
83 |
+
to `num_conv_layers`.
|
84 |
+
conv_stride (`Sequence[int]`, *optional*, defaults to `(3,)`):
|
85 |
+
The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
|
86 |
+
to `num_conv_layers`.
|
87 |
+
input_feat_per_channel (`int`, *optional*, defaults to 80):
|
88 |
+
Feature dimensions of the channels of the input to the Conv1D layer.
|
89 |
+
input_channels (`int`, *optional*, defaults to 1):
|
90 |
+
Number of input channels of the input to the Conv1D layer.
|
91 |
+
conv_channels (`List[int]`, *optional*):
|
92 |
+
Channel sizes of intermediate Conv1D layers.
|
93 |
+
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
|
94 |
+
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
|
95 |
+
instance of [`MCTCTForCTC`].
|
96 |
+
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
|
97 |
+
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
|
98 |
+
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
|
99 |
+
of [`MCTCTForCTC`].
|
100 |
+
|
101 |
+
Example:
|
102 |
+
|
103 |
+
```python
|
104 |
+
>>> from transformers import MCTCTConfig, MCTCTModel
|
105 |
+
|
106 |
+
>>> # Initializing a M-CTC-T mctct-large style configuration
|
107 |
+
>>> configuration = MCTCTConfig()
|
108 |
+
|
109 |
+
>>> # Initializing a model (with random weights) from the mctct-large style configuration
|
110 |
+
>>> model = MCTCTModel(configuration)
|
111 |
+
|
112 |
+
>>> # Accessing the model configuration
|
113 |
+
>>> configuration = model.config
|
114 |
+
```"""
|
115 |
+
|
116 |
+
model_type = "mctct"
|
117 |
+
|
118 |
+
def __init__(
|
119 |
+
self,
|
120 |
+
vocab_size=8065,
|
121 |
+
hidden_size=1536,
|
122 |
+
num_hidden_layers=36,
|
123 |
+
intermediate_size=6144,
|
124 |
+
num_attention_heads=4,
|
125 |
+
attention_head_dim=384,
|
126 |
+
max_position_embeddings=920,
|
127 |
+
layer_norm_eps=1e-5,
|
128 |
+
layerdrop=0.3,
|
129 |
+
hidden_act="relu",
|
130 |
+
initializer_range=0.02,
|
131 |
+
hidden_dropout_prob=0.3,
|
132 |
+
attention_probs_dropout_prob=0.3,
|
133 |
+
pad_token_id=1,
|
134 |
+
bos_token_id=0,
|
135 |
+
eos_token_id=2,
|
136 |
+
conv_glu_dim=1,
|
137 |
+
conv_dropout=0.3,
|
138 |
+
num_conv_layers=1,
|
139 |
+
conv_kernel=(7,),
|
140 |
+
conv_stride=(3,),
|
141 |
+
input_feat_per_channel=80,
|
142 |
+
input_channels=1,
|
143 |
+
conv_channels=None,
|
144 |
+
ctc_loss_reduction="sum",
|
145 |
+
ctc_zero_infinity=False,
|
146 |
+
**kwargs,
|
147 |
+
):
|
148 |
+
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
|
149 |
+
self.vocab_size = vocab_size
|
150 |
+
self.hidden_size = hidden_size
|
151 |
+
self.num_hidden_layers = num_hidden_layers
|
152 |
+
self.intermediate_size = intermediate_size
|
153 |
+
self.num_attention_heads = num_attention_heads
|
154 |
+
self.attention_head_dim = attention_head_dim
|
155 |
+
self.max_position_embeddings = max_position_embeddings
|
156 |
+
self.layer_norm_eps = layer_norm_eps
|
157 |
+
self.layerdrop = layerdrop
|
158 |
+
self.hidden_act = hidden_act
|
159 |
+
self.initializer_range = initializer_range
|
160 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
161 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
162 |
+
self.pad_token_id = pad_token_id
|
163 |
+
self.bos_token_id = bos_token_id
|
164 |
+
self.eos_token_id = eos_token_id
|
165 |
+
self.conv_glu_dim = conv_glu_dim
|
166 |
+
self.conv_dropout = conv_dropout
|
167 |
+
self.num_conv_layers = num_conv_layers
|
168 |
+
self.input_feat_per_channel = input_feat_per_channel
|
169 |
+
self.input_channels = input_channels
|
170 |
+
self.conv_channels = conv_channels
|
171 |
+
self.ctc_loss_reduction = ctc_loss_reduction
|
172 |
+
self.ctc_zero_infinity = ctc_zero_infinity
|
173 |
+
|
174 |
+
# prevents config testing fail with exporting to json
|
175 |
+
self.conv_kernel = list(conv_kernel)
|
176 |
+
self.conv_stride = list(conv_stride)
|
177 |
+
|
178 |
+
if len(self.conv_kernel) != self.num_conv_layers:
|
179 |
+
raise ValueError(
|
180 |
+
"Configuration for convolutional module is incorrect. "
|
181 |
+
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
|
182 |
+
f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
|
183 |
+
f"`config.num_conv_layers = {self.num_conv_layers}`."
|
184 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Feature extractor class for M-CTC-T
|
17 |
+
"""
|
18 |
+
|
19 |
+
from typing import List, Optional, Union
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
|
23 |
+
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
|
24 |
+
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
|
25 |
+
from ....feature_extraction_utils import BatchFeature
|
26 |
+
from ....file_utils import PaddingStrategy, TensorType
|
27 |
+
from ....utils import logging
|
28 |
+
|
29 |
+
|
30 |
+
logger = logging.get_logger(__name__)
|
31 |
+
|
32 |
+
|
33 |
+
class MCTCTFeatureExtractor(SequenceFeatureExtractor):
|
34 |
+
r"""
|
35 |
+
Constructs a M-CTC-T feature extractor.
|
36 |
+
|
37 |
+
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
|
38 |
+
most of the main methods. Users should refer to this superclass for more information regarding those methods. This
|
39 |
+
code has been adapted from Flashlight's C++ code. For more information about the implementation, one can refer to
|
40 |
+
this [notebook](https://colab.research.google.com/drive/1GLtINkkhzms-IsdcGy_-tVCkv0qNF-Gt#scrollTo=pMCRGMmUC_an)
|
41 |
+
that takes the user step-by-step in the implementation.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
feature_size (`int`, defaults to 80):
|
45 |
+
The feature dimension of the extracted features. This is the number of mel_frequency
|
46 |
+
sampling_rate (`int`, defaults to 16000):
|
47 |
+
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
|
48 |
+
padding_value (`float`, defaults to 0.0):
|
49 |
+
The value that is used to fill the padding values.
|
50 |
+
hop_length (`int`, defaults to 10):
|
51 |
+
Number of audio samples between windows. Otherwise referred to as "shift" in many papers.
|
52 |
+
win_length (`int`, defaults to 25):
|
53 |
+
Number of ms per window
|
54 |
+
win_function (`str`, defaults to `"hamming_window"`):
|
55 |
+
Name for the window function used for windowing, must be accessible via `torch.{win_function}`
|
56 |
+
frame_signal_scale (`float`, defaults to 32768.0):
|
57 |
+
Constant multiplied in creating the frames before applying DFT.
|
58 |
+
preemphasis_coeff (`float`, defaults to 0.97):
|
59 |
+
Constant multiplied in applying Pre-emphasis before DFT.
|
60 |
+
mel_floor (`float` defaults to 1.0):
|
61 |
+
Minimum value of mel frequency banks.
|
62 |
+
normalize_means (`bool`, *optional*, defaults to `True`):
|
63 |
+
Whether or not to zero-mean normalize the extracted features.
|
64 |
+
normalize_vars (`bool`, *optional*, defaults to `True`):
|
65 |
+
Whether or not to unit-variance normalize the extracted features.
|
66 |
+
"""
|
67 |
+
|
68 |
+
model_input_names = ["input_features", "attention_mask"]
|
69 |
+
|
70 |
+
def __init__(
|
71 |
+
self,
|
72 |
+
feature_size=80,
|
73 |
+
sampling_rate=16000,
|
74 |
+
padding_value=0.0,
|
75 |
+
hop_length=10,
|
76 |
+
win_length=25,
|
77 |
+
win_function="hamming_window",
|
78 |
+
frame_signal_scale=32768.0,
|
79 |
+
preemphasis_coeff=0.97,
|
80 |
+
mel_floor=1.0,
|
81 |
+
normalize_means=True,
|
82 |
+
normalize_vars=True,
|
83 |
+
return_attention_mask=False,
|
84 |
+
**kwargs,
|
85 |
+
):
|
86 |
+
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
|
87 |
+
|
88 |
+
self.feature_size = feature_size
|
89 |
+
self.sampling_rate = sampling_rate
|
90 |
+
self.padding_value = padding_value
|
91 |
+
self.hop_length = hop_length
|
92 |
+
self.win_length = win_length
|
93 |
+
self.frame_signal_scale = frame_signal_scale
|
94 |
+
self.preemphasis_coeff = preemphasis_coeff
|
95 |
+
self.mel_floor = mel_floor
|
96 |
+
self.normalize_means = normalize_means
|
97 |
+
self.normalize_vars = normalize_vars
|
98 |
+
self.win_function = win_function
|
99 |
+
self.return_attention_mask = return_attention_mask
|
100 |
+
|
101 |
+
self.sample_size = win_length * sampling_rate // 1000
|
102 |
+
self.sample_stride = hop_length * sampling_rate // 1000
|
103 |
+
|
104 |
+
self.n_fft = optimal_fft_length(self.sample_size)
|
105 |
+
self.n_freqs = (self.n_fft // 2) + 1
|
106 |
+
|
107 |
+
def _extract_mfsc_features(self, one_waveform: np.array) -> np.ndarray:
|
108 |
+
"""
|
109 |
+
Extracts MFSC Features for one waveform vector (unbatched). Adapted from Flashlight's C++ MFSC code.
|
110 |
+
"""
|
111 |
+
if self.win_function == "hamming_window":
|
112 |
+
window = window_function(window_length=self.sample_size, name=self.win_function, periodic=False)
|
113 |
+
else:
|
114 |
+
window = window_function(window_length=self.sample_size, name=self.win_function)
|
115 |
+
|
116 |
+
fbanks = mel_filter_bank(
|
117 |
+
num_frequency_bins=self.n_freqs,
|
118 |
+
num_mel_filters=self.feature_size,
|
119 |
+
min_frequency=0.0,
|
120 |
+
max_frequency=self.sampling_rate / 2.0,
|
121 |
+
sampling_rate=self.sampling_rate,
|
122 |
+
)
|
123 |
+
|
124 |
+
msfc_features = spectrogram(
|
125 |
+
one_waveform * self.frame_signal_scale,
|
126 |
+
window=window,
|
127 |
+
frame_length=self.sample_size,
|
128 |
+
hop_length=self.sample_stride,
|
129 |
+
fft_length=self.n_fft,
|
130 |
+
center=False,
|
131 |
+
preemphasis=self.preemphasis_coeff,
|
132 |
+
mel_filters=fbanks,
|
133 |
+
mel_floor=self.mel_floor,
|
134 |
+
log_mel="log",
|
135 |
+
)
|
136 |
+
return msfc_features.T
|
137 |
+
|
138 |
+
def _normalize_one(self, x, input_length, padding_value):
|
139 |
+
# make sure we normalize float32 arrays
|
140 |
+
if self.normalize_means:
|
141 |
+
mean = x[:input_length].mean(axis=0)
|
142 |
+
x = np.subtract(x, mean)
|
143 |
+
if self.normalize_vars:
|
144 |
+
std = x[:input_length].std(axis=0)
|
145 |
+
x = np.divide(x, std)
|
146 |
+
|
147 |
+
if input_length < x.shape[0]:
|
148 |
+
x[input_length:] = padding_value
|
149 |
+
|
150 |
+
# make sure array is in float32
|
151 |
+
x = x.astype(np.float32)
|
152 |
+
|
153 |
+
return x
|
154 |
+
|
155 |
+
def normalize(
|
156 |
+
self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
|
157 |
+
) -> List[np.ndarray]:
|
158 |
+
lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
|
159 |
+
return [self._normalize_one(x, n, self.padding_value) for x, n in zip(input_features, lengths)]
|
160 |
+
|
161 |
+
def __call__(
|
162 |
+
self,
|
163 |
+
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
|
164 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
165 |
+
max_length: Optional[int] = None,
|
166 |
+
truncation: bool = False,
|
167 |
+
pad_to_multiple_of: Optional[int] = None,
|
168 |
+
return_attention_mask: Optional[bool] = None,
|
169 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
170 |
+
sampling_rate: Optional[int] = None,
|
171 |
+
**kwargs,
|
172 |
+
) -> BatchFeature:
|
173 |
+
"""
|
174 |
+
Main method to featurize and prepare for the model one or several sequence(s). sequences. It returns the
|
175 |
+
log-mel spectrogram of the input audio, as implemented in the original Flashlight MFSC feature extraction code.
|
176 |
+
|
177 |
+
Args:
|
178 |
+
raw_speech (`torch.Tensor`, `np.ndarray`, `List[float]`, `List[torch.Tensor]`, `List[np.ndarray]`, `List[List[float]]`):
|
179 |
+
The sequence or batch of sequences to be padded. Each sequence can be a tensor, a numpy array, a list
|
180 |
+
of float values, a list of tensors, a list of numpy arrays or a list of list of float values. Must be
|
181 |
+
mono channel audio, not stereo, i.e. single float per timestep.
|
182 |
+
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
|
183 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
184 |
+
index) among:
|
185 |
+
|
186 |
+
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
187 |
+
sequence if provided).
|
188 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
189 |
+
acceptable input length for the model if that argument is not provided.
|
190 |
+
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
191 |
+
lengths).
|
192 |
+
max_length (`int`, *optional*):
|
193 |
+
Maximum length of the returned list and optionally padding length (see above).
|
194 |
+
truncation (`bool`):
|
195 |
+
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
|
196 |
+
pad_to_multiple_of (`int`, *optional*):
|
197 |
+
If set will pad the sequence to a multiple of the provided value.
|
198 |
+
|
199 |
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
|
200 |
+
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
|
201 |
+
return_attention_mask (`bool`, *optional*):
|
202 |
+
Whether to return the attention mask. If left to the default, will return the attention mask according
|
203 |
+
to the specific feature_extractor's default.
|
204 |
+
|
205 |
+
[What are attention masks?](../glossary#attention-mask)
|
206 |
+
|
207 |
+
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
|
208 |
+
If set, will return tensors instead of list of python integers. Acceptable values are:
|
209 |
+
|
210 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
211 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
212 |
+
- `'np'`: Return Numpy `np.ndarray` objects.
|
213 |
+
sampling_rate (`int`, *optional*):
|
214 |
+
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
|
215 |
+
`sampling_rate` at the forward call to prevent silent errors.
|
216 |
+
padding_value (`float`, defaults to 0.0):
|
217 |
+
"""
|
218 |
+
|
219 |
+
if sampling_rate is not None:
|
220 |
+
if sampling_rate != self.sampling_rate:
|
221 |
+
raise ValueError(
|
222 |
+
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
|
223 |
+
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
|
224 |
+
f" {self.sampling_rate} and not {sampling_rate}."
|
225 |
+
)
|
226 |
+
else:
|
227 |
+
logger.warning(
|
228 |
+
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
|
229 |
+
"Failing to do so can result in silent errors that might be hard to debug."
|
230 |
+
)
|
231 |
+
|
232 |
+
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
|
233 |
+
if is_batched_numpy and len(raw_speech.shape) > 2:
|
234 |
+
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
|
235 |
+
is_batched = is_batched_numpy or (
|
236 |
+
isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
|
237 |
+
)
|
238 |
+
|
239 |
+
if is_batched:
|
240 |
+
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
|
241 |
+
elif not is_batched and not isinstance(raw_speech, np.ndarray):
|
242 |
+
raw_speech = np.asarray(raw_speech, dtype=np.float32)
|
243 |
+
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
|
244 |
+
raw_speech = raw_speech.astype(np.float32)
|
245 |
+
|
246 |
+
# always return batch
|
247 |
+
if not is_batched:
|
248 |
+
raw_speech = [raw_speech]
|
249 |
+
|
250 |
+
# extract fbank features
|
251 |
+
features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech]
|
252 |
+
|
253 |
+
# convert into correct format for padding
|
254 |
+
encoded_inputs = BatchFeature({"input_features": features})
|
255 |
+
|
256 |
+
padded_inputs = self.pad(
|
257 |
+
encoded_inputs,
|
258 |
+
padding=padding,
|
259 |
+
max_length=max_length,
|
260 |
+
truncation=truncation,
|
261 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
262 |
+
return_attention_mask=True,
|
263 |
+
**kwargs,
|
264 |
+
)
|
265 |
+
# make sure list is in array format
|
266 |
+
input_features = padded_inputs.get("input_features")
|
267 |
+
if isinstance(input_features[0], list):
|
268 |
+
padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
|
269 |
+
|
270 |
+
attention_mask = padded_inputs.get("attention_mask")
|
271 |
+
if attention_mask is not None:
|
272 |
+
padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
|
273 |
+
|
274 |
+
if self.normalize_means or self.normalize_vars:
|
275 |
+
attention_mask = (
|
276 |
+
np.array(attention_mask, dtype=np.int32)
|
277 |
+
if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
|
278 |
+
and padding
|
279 |
+
else None
|
280 |
+
)
|
281 |
+
padded_inputs["input_features"] = self.normalize(
|
282 |
+
padded_inputs["input_features"], attention_mask=attention_mask
|
283 |
+
)
|
284 |
+
|
285 |
+
if return_tensors is not None:
|
286 |
+
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
|
287 |
+
|
288 |
+
return padded_inputs
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py
ADDED
@@ -0,0 +1,792 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch M-CTC-T model."""
|
16 |
+
|
17 |
+
|
18 |
+
import math
|
19 |
+
from typing import Optional, Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.utils.checkpoint
|
23 |
+
from torch import nn
|
24 |
+
|
25 |
+
from ....activations import ACT2FN
|
26 |
+
from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
|
27 |
+
from ....integrations.deepspeed import is_deepspeed_zero3_enabled
|
28 |
+
from ....modeling_attn_mask_utils import _prepare_4d_attention_mask
|
29 |
+
from ....modeling_outputs import BaseModelOutput, CausalLMOutput
|
30 |
+
from ....modeling_utils import (
|
31 |
+
PreTrainedModel,
|
32 |
+
apply_chunking_to_forward,
|
33 |
+
find_pruneable_heads_and_indices,
|
34 |
+
prune_linear_layer,
|
35 |
+
)
|
36 |
+
from ....utils import logging
|
37 |
+
from .configuration_mctct import MCTCTConfig
|
38 |
+
|
39 |
+
|
40 |
+
logger = logging.get_logger(__name__)
|
41 |
+
|
42 |
+
_HIDDEN_STATES_START_POSITION = 1
|
43 |
+
|
44 |
+
_CONFIG_FOR_DOC = "MCTCTConfig"
|
45 |
+
|
46 |
+
# Base docstring
|
47 |
+
_CHECKPOINT_FOR_DOC = "speechbrain/m-ctc-t-large"
|
48 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 195, 1536]
|
49 |
+
|
50 |
+
# CTC docstring
|
51 |
+
_CTC_EXPECTED_OUTPUT = '"Mr. Quilter is the apostle of the middle classes, and we\'re glad to welcome his gospel."'
|
52 |
+
_CTC_EXPECTED_LOSS = 1885.65
|
53 |
+
|
54 |
+
|
55 |
+
from .._archive_maps import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
56 |
+
|
57 |
+
|
58 |
+
class MCTCTConv1dSubsampler(nn.Module):
|
59 |
+
"""
|
60 |
+
Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
|
61 |
+
via gated linear units (https://arxiv.org/abs/1911.08460)
|
62 |
+
"""
|
63 |
+
|
64 |
+
def __init__(self, config):
|
65 |
+
super().__init__()
|
66 |
+
self.config = config
|
67 |
+
self.glu_dim = config.conv_glu_dim
|
68 |
+
|
69 |
+
self.dropout = nn.Dropout(config.conv_dropout)
|
70 |
+
|
71 |
+
self.num_layers = config.num_conv_layers
|
72 |
+
self.in_channels = config.input_feat_per_channel * config.input_channels
|
73 |
+
|
74 |
+
if self.num_layers > 1:
|
75 |
+
if config.conv_channels is None:
|
76 |
+
raise ValueError(
|
77 |
+
"Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution"
|
78 |
+
" layers."
|
79 |
+
)
|
80 |
+
|
81 |
+
self.mid_channels = config.conv_channels
|
82 |
+
else:
|
83 |
+
self.mid_channels = None
|
84 |
+
|
85 |
+
self.out_channels = config.hidden_size * 2 # considering GLU halving
|
86 |
+
self.kernel_size = config.conv_kernel
|
87 |
+
self.stride = config.conv_stride
|
88 |
+
|
89 |
+
# NOTE: MCTCT by construction only uses one convolution kernel. I've made this flexible to allow for
|
90 |
+
# multiple layers of convolutions, but not sure if this model definition should just restrict it
|
91 |
+
# to one layer. This becomes especially relevant when considering the padding like line 1 of forward().
|
92 |
+
self.conv_layers = nn.ModuleList(
|
93 |
+
nn.Conv1d(
|
94 |
+
self.in_channels if i == 0 else self.mid_channels[i],
|
95 |
+
self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels,
|
96 |
+
kernel_size=k,
|
97 |
+
stride=self.stride[i],
|
98 |
+
padding="valid",
|
99 |
+
)
|
100 |
+
for i, k in enumerate(self.kernel_size)
|
101 |
+
)
|
102 |
+
|
103 |
+
def forward(self, input_features):
|
104 |
+
# NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if
|
105 |
+
# there will be just one conv layer.
|
106 |
+
padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3)
|
107 |
+
|
108 |
+
input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0)
|
109 |
+
hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time
|
110 |
+
for conv in self.conv_layers:
|
111 |
+
hidden_states = conv(hidden_states)
|
112 |
+
hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim)
|
113 |
+
hidden_states = self.dropout(hidden_states)
|
114 |
+
|
115 |
+
hidden_states = hidden_states.transpose(1, 2).contiguous() # -> Batch x Time x Frame
|
116 |
+
return hidden_states
|
117 |
+
|
118 |
+
|
119 |
+
class MCTCTEmbeddings(nn.Module):
|
120 |
+
"""Construct the embeddings from word, position and token_type embeddings."""
|
121 |
+
|
122 |
+
def __init__(self, config):
|
123 |
+
super().__init__()
|
124 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
125 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
126 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
127 |
+
|
128 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
129 |
+
# any TensorFlow checkpoint file
|
130 |
+
# self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
131 |
+
self.LayerNorm = MCTCTLayerNorm()
|
132 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
133 |
+
|
134 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
135 |
+
self.register_buffer(
|
136 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
137 |
+
)
|
138 |
+
self.register_buffer(
|
139 |
+
"token_type_ids",
|
140 |
+
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
|
141 |
+
persistent=False,
|
142 |
+
)
|
143 |
+
|
144 |
+
def forward(
|
145 |
+
self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
146 |
+
):
|
147 |
+
input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1]
|
148 |
+
|
149 |
+
seq_length = input_shape[1]
|
150 |
+
|
151 |
+
if position_ids is None:
|
152 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
153 |
+
|
154 |
+
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
|
155 |
+
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
|
156 |
+
# issue #5664
|
157 |
+
if token_type_ids is None:
|
158 |
+
if hasattr(self, "token_type_ids"):
|
159 |
+
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
|
160 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
|
161 |
+
token_type_ids = buffered_token_type_ids_expanded
|
162 |
+
else:
|
163 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
164 |
+
|
165 |
+
if inputs_embeds is None:
|
166 |
+
inputs_embeds = self.word_embeddings(input_features)
|
167 |
+
|
168 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
169 |
+
|
170 |
+
embeddings = inputs_embeds + token_type_embeddings
|
171 |
+
|
172 |
+
embeddings = self.LayerNorm(embeddings)
|
173 |
+
embeddings = self.dropout(embeddings)
|
174 |
+
return embeddings
|
175 |
+
|
176 |
+
|
177 |
+
class MCTCTSelfAttention(nn.Module):
|
178 |
+
def __init__(self, config):
|
179 |
+
super().__init__()
|
180 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
181 |
+
raise ValueError(
|
182 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
183 |
+
f"heads ({config.num_attention_heads})"
|
184 |
+
)
|
185 |
+
|
186 |
+
self.num_attention_heads = config.num_attention_heads
|
187 |
+
self.attention_head_size = config.attention_head_dim
|
188 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
189 |
+
|
190 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
|
191 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
|
192 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
|
193 |
+
|
194 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
195 |
+
|
196 |
+
self.max_position_embeddings = config.max_position_embeddings
|
197 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
198 |
+
|
199 |
+
self.is_decoder = config.is_decoder
|
200 |
+
|
201 |
+
def transpose_for_scores(self, x):
|
202 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
203 |
+
x = x.view(*new_x_shape)
|
204 |
+
return x.permute(0, 2, 1, 3)
|
205 |
+
|
206 |
+
def reshape_fortran(self, x, shape):
|
207 |
+
if len(x.shape) > 0:
|
208 |
+
x = x.permute(*reversed(range(len(x.shape))))
|
209 |
+
return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape))))
|
210 |
+
|
211 |
+
def relative_position_embedding_rotate(self, scores):
|
212 |
+
# NOTE: should re-evaluate whether this re-implementation was truly necessary
|
213 |
+
# or the reason why my complete re-haul worked was due to some other part
|
214 |
+
# of the code. Adding this and the reshape fortrain code seems very undesirable.
|
215 |
+
scores = scores.permute(0, 2, 3, 1) # e.g. [10, 1839, 14, 4]
|
216 |
+
|
217 |
+
batch, hidden_state, seq_len, heads = scores.shape
|
218 |
+
|
219 |
+
# e.g. [10, 1853, 14, 4]
|
220 |
+
scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1)
|
221 |
+
|
222 |
+
# e.g. [10, 25942, 1, 4]
|
223 |
+
scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads])
|
224 |
+
|
225 |
+
# e.g. [10, 25928, 1, 4]
|
226 |
+
scores = scores[:, : (seq_len + hidden_state - 1) * seq_len]
|
227 |
+
|
228 |
+
# e.g. [10, 1852, 14, 4]
|
229 |
+
scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads])
|
230 |
+
|
231 |
+
halfpoint = hidden_state // 2
|
232 |
+
scores = scores[:, halfpoint : halfpoint + seq_len].transpose(1, 2) # e.g. [10, 14, 14, 4]
|
233 |
+
|
234 |
+
return scores.permute(0, 3, 1, 2)
|
235 |
+
|
236 |
+
def forward(
|
237 |
+
self,
|
238 |
+
hidden_states,
|
239 |
+
attention_mask=None,
|
240 |
+
head_mask=None,
|
241 |
+
output_attentions=False,
|
242 |
+
):
|
243 |
+
mixed_query_layer = self.query(hidden_states)
|
244 |
+
mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size)
|
245 |
+
|
246 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
247 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
248 |
+
|
249 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
250 |
+
|
251 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
252 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
253 |
+
|
254 |
+
# relative key position embeddings
|
255 |
+
positional_embedding = self.distance_embedding.weight
|
256 |
+
relative_position_scores = torch.einsum("lh, bche -> bcle", positional_embedding, query_layer.transpose(2, 3))
|
257 |
+
|
258 |
+
relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores)
|
259 |
+
attention_scores = attention_scores + relative_position_scores
|
260 |
+
|
261 |
+
if attention_mask is not None:
|
262 |
+
# Apply the attention mask is (precomputed for all layers in MCTCTModel forward() function)
|
263 |
+
attention_scores = attention_scores + attention_mask
|
264 |
+
|
265 |
+
# Normalize the attention scores to probabilities.
|
266 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
267 |
+
|
268 |
+
# This is actually dropping out entire tokens to attend to, which might
|
269 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
270 |
+
attention_probs = self.dropout(attention_probs)
|
271 |
+
|
272 |
+
# Mask heads if we want to
|
273 |
+
if head_mask is not None:
|
274 |
+
attention_probs = attention_probs * head_mask
|
275 |
+
|
276 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
277 |
+
|
278 |
+
context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2)
|
279 |
+
|
280 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
281 |
+
|
282 |
+
return outputs
|
283 |
+
|
284 |
+
|
285 |
+
class MCTCTLayerNorm(nn.Module):
|
286 |
+
def __init__(self):
|
287 |
+
super().__init__()
|
288 |
+
self.singleton_weight = nn.Parameter(torch.ones(1))
|
289 |
+
self.singleton_bias = nn.Parameter(torch.zeros(1))
|
290 |
+
|
291 |
+
def forward(self, hidden_states):
|
292 |
+
return (hidden_states * self.singleton_weight) + self.singleton_bias
|
293 |
+
|
294 |
+
|
295 |
+
class MCTCTSelfOutput(nn.Module):
|
296 |
+
def __init__(self, config):
|
297 |
+
super().__init__()
|
298 |
+
self.config = config
|
299 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
|
300 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
301 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
302 |
+
|
303 |
+
def forward(self, hidden_states, input_tensor):
|
304 |
+
hidden_states = self.dense(hidden_states)
|
305 |
+
hidden_states = self.dropout(hidden_states)
|
306 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
307 |
+
return hidden_states
|
308 |
+
|
309 |
+
|
310 |
+
class MCTCTAttention(nn.Module):
|
311 |
+
def __init__(self, config):
|
312 |
+
super().__init__()
|
313 |
+
self.self = MCTCTSelfAttention(config)
|
314 |
+
self.output = MCTCTSelfOutput(config)
|
315 |
+
self.pruned_heads = set()
|
316 |
+
|
317 |
+
def prune_heads(self, heads):
|
318 |
+
if len(heads) == 0:
|
319 |
+
return
|
320 |
+
heads, index = find_pruneable_heads_and_indices(
|
321 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
322 |
+
)
|
323 |
+
|
324 |
+
# Prune linear layers
|
325 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
326 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
327 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
328 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
329 |
+
|
330 |
+
# Update hyper params and store pruned heads
|
331 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
332 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
333 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
334 |
+
|
335 |
+
def forward(
|
336 |
+
self,
|
337 |
+
hidden_states,
|
338 |
+
attention_mask=None,
|
339 |
+
head_mask=None,
|
340 |
+
output_attentions=False,
|
341 |
+
):
|
342 |
+
self_outputs = self.self(
|
343 |
+
hidden_states,
|
344 |
+
attention_mask,
|
345 |
+
head_mask,
|
346 |
+
output_attentions,
|
347 |
+
)
|
348 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
349 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
350 |
+
|
351 |
+
return outputs
|
352 |
+
|
353 |
+
|
354 |
+
class MCTCTIntermediate(nn.Module):
|
355 |
+
def __init__(self, config):
|
356 |
+
super().__init__()
|
357 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
|
358 |
+
if isinstance(config.hidden_act, str):
|
359 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
360 |
+
else:
|
361 |
+
self.intermediate_act_fn = config.hidden_act
|
362 |
+
|
363 |
+
def forward(self, hidden_states):
|
364 |
+
hidden_states = self.dense(hidden_states)
|
365 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
366 |
+
return hidden_states
|
367 |
+
|
368 |
+
|
369 |
+
class MCTCTOutput(nn.Module):
|
370 |
+
def __init__(self, config):
|
371 |
+
super().__init__()
|
372 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
|
373 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
374 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
375 |
+
|
376 |
+
def forward(self, hidden_states, input_tensor):
|
377 |
+
hidden_states = self.dense(hidden_states)
|
378 |
+
hidden_states = self.dropout(hidden_states)
|
379 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
380 |
+
return hidden_states
|
381 |
+
|
382 |
+
|
383 |
+
class MCTCTLayer(nn.Module):
|
384 |
+
def __init__(self, config: MCTCTConfig):
|
385 |
+
super().__init__()
|
386 |
+
|
387 |
+
self.seq_len_dim = 1
|
388 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
389 |
+
|
390 |
+
self.intermediate = MCTCTIntermediate(config)
|
391 |
+
self.attention = MCTCTAttention(config)
|
392 |
+
self.is_decoder = config.is_decoder
|
393 |
+
self.output = MCTCTOutput(config)
|
394 |
+
|
395 |
+
def forward(
|
396 |
+
self,
|
397 |
+
hidden_states,
|
398 |
+
attention_mask=None,
|
399 |
+
head_mask=None,
|
400 |
+
output_attentions=False,
|
401 |
+
):
|
402 |
+
self_attention_outputs = self.attention(
|
403 |
+
hidden_states, attention_mask, head_mask, output_attentions=output_attentions
|
404 |
+
)
|
405 |
+
attention_output = self_attention_outputs[0]
|
406 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
407 |
+
|
408 |
+
layer_output = apply_chunking_to_forward(
|
409 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
410 |
+
)
|
411 |
+
|
412 |
+
outputs = (layer_output,) + outputs
|
413 |
+
|
414 |
+
return outputs
|
415 |
+
|
416 |
+
def feed_forward_chunk(self, attention_output):
|
417 |
+
intermediate_output = self.intermediate(attention_output)
|
418 |
+
layer_output = self.output(intermediate_output, attention_output)
|
419 |
+
return layer_output
|
420 |
+
|
421 |
+
|
422 |
+
class MCTCTPreTrainedModel(PreTrainedModel):
|
423 |
+
"""
|
424 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
425 |
+
models.
|
426 |
+
"""
|
427 |
+
|
428 |
+
config_class = MCTCTConfig
|
429 |
+
base_model_prefix = "mctct"
|
430 |
+
main_input_name = "input_features"
|
431 |
+
supports_gradient_checkpointing = True
|
432 |
+
|
433 |
+
def _init_weights(self, module):
|
434 |
+
"""Initialize the weights"""
|
435 |
+
std = self.config.initializer_range
|
436 |
+
if isinstance(module, nn.Linear):
|
437 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
438 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
439 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
440 |
+
if module.bias is not None:
|
441 |
+
module.bias.data.zero_()
|
442 |
+
elif isinstance(module, nn.Embedding):
|
443 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
444 |
+
if module.padding_idx is not None:
|
445 |
+
module.weight.data[module.padding_idx].zero_()
|
446 |
+
elif isinstance(module, nn.LayerNorm):
|
447 |
+
module.bias.data.zero_()
|
448 |
+
module.weight.data.fill_(1.0)
|
449 |
+
elif isinstance(module, MCTCTLayerNorm):
|
450 |
+
module.singleton_weight.data.fill_(1.0)
|
451 |
+
module.singleton_bias.data.zero_()
|
452 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
453 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
454 |
+
if module.bias is not None:
|
455 |
+
module.bias.data.zero_()
|
456 |
+
|
457 |
+
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
|
458 |
+
"""
|
459 |
+
Computes the output length of the convolutional layers
|
460 |
+
"""
|
461 |
+
dilation = 1
|
462 |
+
for _, kernel_sz, stride in zip(
|
463 |
+
range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride
|
464 |
+
):
|
465 |
+
padding = kernel_sz // 2
|
466 |
+
input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1
|
467 |
+
input_lengths = torch.div(input_lengths, stride, rounding_mode="trunc") + 1
|
468 |
+
|
469 |
+
return input_lengths
|
470 |
+
|
471 |
+
def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
|
472 |
+
# generate creates 3D attention mask, because of the shape of input_features
|
473 |
+
# convert it to 2D if thats the case
|
474 |
+
if len(attention_mask.shape) > 2:
|
475 |
+
attention_mask = attention_mask[:, :, -1]
|
476 |
+
|
477 |
+
# subsampled_lengths = attention_mask.sum(-1)
|
478 |
+
subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
|
479 |
+
bsz = attention_mask.size()[0]
|
480 |
+
attention_mask = torch.zeros(
|
481 |
+
(bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
|
482 |
+
)
|
483 |
+
|
484 |
+
# these two operations makes sure that all values
|
485 |
+
# before the output lengths indices are attended to
|
486 |
+
attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
|
487 |
+
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
|
488 |
+
return attention_mask
|
489 |
+
|
490 |
+
|
491 |
+
MCTCT_START_DOCSTRING = r"""
|
492 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
493 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
494 |
+
behavior.
|
495 |
+
|
496 |
+
Parameters:
|
497 |
+
config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model.
|
498 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
499 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
500 |
+
"""
|
501 |
+
|
502 |
+
MCTCT_INPUTS_DOCSTRING = r"""
|
503 |
+
Args:
|
504 |
+
input_features (`torch.LongTensor` of shape `({0})`):
|
505 |
+
Indices of input sequence tokens in the vocabulary.
|
506 |
+
|
507 |
+
Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
508 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
509 |
+
|
510 |
+
[What are input IDs?](../glossary#input-ids)
|
511 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
512 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
513 |
+
|
514 |
+
- 1 for tokens that are **not masked**,
|
515 |
+
- 0 for tokens that are **masked**.
|
516 |
+
|
517 |
+
[What are attention masks?](../glossary#attention-mask)
|
518 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
519 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
520 |
+
|
521 |
+
- 1 indicates the head is **not masked**,
|
522 |
+
- 0 indicates the head is **masked**.
|
523 |
+
output_attentions (`bool`, *optional*):
|
524 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
525 |
+
tensors for more detail.
|
526 |
+
output_hidden_states (`bool`, *optional*):
|
527 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
528 |
+
more detail.
|
529 |
+
return_dict (`bool`, *optional*):
|
530 |
+
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
|
531 |
+
"""
|
532 |
+
|
533 |
+
|
534 |
+
class MCTCTEncoder(MCTCTPreTrainedModel):
|
535 |
+
def __init__(self, config: MCTCTConfig):
|
536 |
+
super().__init__(config)
|
537 |
+
self.hidden_dropout_prob = config.hidden_dropout_prob
|
538 |
+
|
539 |
+
self.layer_norm = MCTCTLayerNorm()
|
540 |
+
self.conv = MCTCTConv1dSubsampler(config)
|
541 |
+
self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)])
|
542 |
+
|
543 |
+
self.gradient_checkpointing = False
|
544 |
+
|
545 |
+
def forward(
|
546 |
+
self,
|
547 |
+
input_features: torch.Tensor,
|
548 |
+
attention_mask: torch.Tensor,
|
549 |
+
head_mask: torch.Tensor,
|
550 |
+
output_attentions: bool = False,
|
551 |
+
output_hidden_states: bool = False,
|
552 |
+
return_dict: bool = True,
|
553 |
+
) -> Union[Tuple, BaseModelOutput]:
|
554 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
555 |
+
output_hidden_states = (
|
556 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
557 |
+
)
|
558 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
559 |
+
|
560 |
+
input_features = self.layer_norm(input_features)
|
561 |
+
|
562 |
+
inputs_embeds = self.conv(input_features)
|
563 |
+
|
564 |
+
# subsample attention mask if necessary
|
565 |
+
if attention_mask is not None:
|
566 |
+
attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
|
567 |
+
|
568 |
+
hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training)
|
569 |
+
|
570 |
+
# expand attention_mask
|
571 |
+
if attention_mask is not None:
|
572 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
573 |
+
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
|
574 |
+
|
575 |
+
encoder_states = () if output_hidden_states else None
|
576 |
+
all_attentions = () if output_attentions else None
|
577 |
+
|
578 |
+
# check if head_mask has a correct number of layers specified if desired
|
579 |
+
if head_mask is not None:
|
580 |
+
if head_mask.size()[0] != len(self.layers):
|
581 |
+
raise ValueError(
|
582 |
+
f"The head_mask should be specified for {len(self.layers)} layers, "
|
583 |
+
f"but it is for {head_mask.size()[0]}."
|
584 |
+
)
|
585 |
+
|
586 |
+
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
|
587 |
+
for idx, encoder_layer in enumerate(self.layers):
|
588 |
+
if output_hidden_states:
|
589 |
+
encoder_states = encoder_states + (hidden_states,)
|
590 |
+
|
591 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
592 |
+
dropout_probability = torch.rand([])
|
593 |
+
|
594 |
+
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
|
595 |
+
if not skip_the_layer or deepspeed_zero3_is_enabled:
|
596 |
+
# under deepspeed zero3 all gpus must run in sync
|
597 |
+
if self.gradient_checkpointing and self.training:
|
598 |
+
layer_outputs = self._gradient_checkpointing_func(
|
599 |
+
encoder_layer.__call__,
|
600 |
+
hidden_states,
|
601 |
+
attention_mask,
|
602 |
+
(head_mask[idx] if head_mask is not None else None),
|
603 |
+
output_attentions,
|
604 |
+
)
|
605 |
+
else:
|
606 |
+
layer_outputs = encoder_layer(
|
607 |
+
hidden_states=hidden_states,
|
608 |
+
attention_mask=attention_mask,
|
609 |
+
output_attentions=output_attentions,
|
610 |
+
)
|
611 |
+
|
612 |
+
hidden_states = layer_outputs[0]
|
613 |
+
|
614 |
+
if skip_the_layer:
|
615 |
+
layer_outputs = (None, None)
|
616 |
+
|
617 |
+
if output_attentions:
|
618 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
619 |
+
|
620 |
+
if output_hidden_states:
|
621 |
+
encoder_states = encoder_states + (hidden_states,)
|
622 |
+
|
623 |
+
if not return_dict:
|
624 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
625 |
+
return BaseModelOutput(
|
626 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
627 |
+
)
|
628 |
+
|
629 |
+
|
630 |
+
@add_start_docstrings(
|
631 |
+
"The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.",
|
632 |
+
MCTCT_START_DOCSTRING,
|
633 |
+
)
|
634 |
+
class MCTCTModel(MCTCTPreTrainedModel):
|
635 |
+
def __init__(self, config):
|
636 |
+
super().__init__(config)
|
637 |
+
self.config = config
|
638 |
+
|
639 |
+
self.encoder = MCTCTEncoder(config)
|
640 |
+
|
641 |
+
# Initialize weights and apply final processing
|
642 |
+
self.post_init()
|
643 |
+
|
644 |
+
@add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
645 |
+
@add_code_sample_docstrings(
|
646 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
647 |
+
output_type=BaseModelOutput,
|
648 |
+
config_class=_CONFIG_FOR_DOC,
|
649 |
+
modality="audio",
|
650 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
651 |
+
)
|
652 |
+
def forward(
|
653 |
+
self,
|
654 |
+
input_features: torch.Tensor,
|
655 |
+
attention_mask: Optional[torch.Tensor] = None,
|
656 |
+
head_mask: Optional[torch.Tensor] = None,
|
657 |
+
output_attentions: Optional[bool] = None,
|
658 |
+
output_hidden_states: Optional[bool] = None,
|
659 |
+
return_dict: Optional[bool] = None,
|
660 |
+
) -> Union[Tuple, BaseModelOutput]:
|
661 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
662 |
+
output_hidden_states = (
|
663 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
664 |
+
)
|
665 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
666 |
+
|
667 |
+
if input_features is None:
|
668 |
+
raise ValueError("You have to specify input_features.")
|
669 |
+
|
670 |
+
encoder_outputs = self.encoder(
|
671 |
+
input_features,
|
672 |
+
attention_mask=attention_mask,
|
673 |
+
head_mask=head_mask,
|
674 |
+
output_attentions=output_attentions,
|
675 |
+
output_hidden_states=output_hidden_states,
|
676 |
+
return_dict=return_dict,
|
677 |
+
)
|
678 |
+
sequence_output = encoder_outputs[0]
|
679 |
+
|
680 |
+
if not return_dict:
|
681 |
+
return (sequence_output,) + encoder_outputs[1:]
|
682 |
+
|
683 |
+
return BaseModelOutput(
|
684 |
+
last_hidden_state=sequence_output,
|
685 |
+
hidden_states=encoder_outputs.hidden_states,
|
686 |
+
attentions=encoder_outputs.attentions,
|
687 |
+
)
|
688 |
+
|
689 |
+
|
690 |
+
@add_start_docstrings(
|
691 |
+
"""MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
|
692 |
+
MCTCT_START_DOCSTRING,
|
693 |
+
)
|
694 |
+
class MCTCTForCTC(MCTCTPreTrainedModel):
|
695 |
+
def __init__(self, config):
|
696 |
+
super().__init__(config)
|
697 |
+
|
698 |
+
self.mctct = MCTCTModel(config)
|
699 |
+
|
700 |
+
if config.vocab_size is None:
|
701 |
+
raise ValueError(
|
702 |
+
f"You are trying to instantiate {self.__class__} with a configuration that "
|
703 |
+
"does not define the vocabulary size of the language model head. Please "
|
704 |
+
"instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
|
705 |
+
"or define `vocab_size` of your model's configuration."
|
706 |
+
)
|
707 |
+
output_hidden_size = config.hidden_size
|
708 |
+
|
709 |
+
self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size)
|
710 |
+
|
711 |
+
# Initialize weights and apply final processing
|
712 |
+
self.post_init()
|
713 |
+
|
714 |
+
@add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING)
|
715 |
+
@add_code_sample_docstrings(
|
716 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
717 |
+
output_type=CausalLMOutput,
|
718 |
+
config_class=_CONFIG_FOR_DOC,
|
719 |
+
expected_output=_CTC_EXPECTED_OUTPUT,
|
720 |
+
expected_loss=_CTC_EXPECTED_LOSS,
|
721 |
+
)
|
722 |
+
def forward(
|
723 |
+
self,
|
724 |
+
input_features: torch.Tensor,
|
725 |
+
attention_mask: Optional[torch.Tensor] = None,
|
726 |
+
head_mask: Optional[torch.Tensor] = None,
|
727 |
+
output_attentions: Optional[bool] = None,
|
728 |
+
output_hidden_states: Optional[bool] = None,
|
729 |
+
return_dict: Optional[bool] = None,
|
730 |
+
labels: Optional[torch.LongTensor] = None,
|
731 |
+
) -> Union[Tuple, CausalLMOutput]:
|
732 |
+
r"""
|
733 |
+
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
|
734 |
+
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
|
735 |
+
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
|
736 |
+
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
|
737 |
+
config.vocab_size - 1]`.
|
738 |
+
"""
|
739 |
+
|
740 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
741 |
+
outputs = self.mctct(
|
742 |
+
input_features,
|
743 |
+
attention_mask=attention_mask,
|
744 |
+
head_mask=head_mask,
|
745 |
+
output_attentions=output_attentions,
|
746 |
+
output_hidden_states=output_hidden_states,
|
747 |
+
return_dict=return_dict,
|
748 |
+
)
|
749 |
+
|
750 |
+
hidden_states = outputs[0]
|
751 |
+
|
752 |
+
logits = self.ctc_head(hidden_states)
|
753 |
+
|
754 |
+
loss = None
|
755 |
+
if labels is not None:
|
756 |
+
if labels.max() >= self.config.vocab_size:
|
757 |
+
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
|
758 |
+
|
759 |
+
# retrieve loss input_lengths from attention_mask
|
760 |
+
attention_mask = (
|
761 |
+
attention_mask
|
762 |
+
if attention_mask is not None
|
763 |
+
else torch.ones(input_features.shape[:-1], dtype=torch.long)
|
764 |
+
)
|
765 |
+
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
|
766 |
+
# assuming that padded tokens are filled with -100
|
767 |
+
# when not being attended to
|
768 |
+
labels_mask = labels >= 0
|
769 |
+
target_lengths = labels_mask.sum(-1)
|
770 |
+
flattened_targets = labels.masked_select(labels_mask)
|
771 |
+
|
772 |
+
# ctc_loss doesn't support fp16
|
773 |
+
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
|
774 |
+
|
775 |
+
with torch.backends.cudnn.flags(enabled=False):
|
776 |
+
loss = nn.functional.ctc_loss(
|
777 |
+
log_probs,
|
778 |
+
flattened_targets,
|
779 |
+
input_lengths,
|
780 |
+
target_lengths,
|
781 |
+
blank=self.config.pad_token_id,
|
782 |
+
reduction=self.config.ctc_loss_reduction,
|
783 |
+
zero_infinity=self.config.ctc_zero_infinity,
|
784 |
+
)
|
785 |
+
|
786 |
+
if not return_dict:
|
787 |
+
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
|
788 |
+
return ((loss,) + output) if loss is not None else output
|
789 |
+
|
790 |
+
return CausalLMOutput(
|
791 |
+
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
|
792 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Speech processor class for M-CTC-T
|
17 |
+
"""
|
18 |
+
import warnings
|
19 |
+
from contextlib import contextmanager
|
20 |
+
|
21 |
+
from ....processing_utils import ProcessorMixin
|
22 |
+
|
23 |
+
|
24 |
+
class MCTCTProcessor(ProcessorMixin):
|
25 |
+
r"""
|
26 |
+
Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor.
|
27 |
+
|
28 |
+
[`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the
|
29 |
+
[`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
feature_extractor (`MCTCTFeatureExtractor`):
|
33 |
+
An instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input.
|
34 |
+
tokenizer (`AutoTokenizer`):
|
35 |
+
An instance of [`AutoTokenizer`]. The tokenizer is a required input.
|
36 |
+
"""
|
37 |
+
|
38 |
+
feature_extractor_class = "MCTCTFeatureExtractor"
|
39 |
+
tokenizer_class = "AutoTokenizer"
|
40 |
+
|
41 |
+
def __init__(self, feature_extractor, tokenizer):
|
42 |
+
super().__init__(feature_extractor, tokenizer)
|
43 |
+
self.current_processor = self.feature_extractor
|
44 |
+
self._in_target_context_manager = False
|
45 |
+
|
46 |
+
def __call__(self, *args, **kwargs):
|
47 |
+
"""
|
48 |
+
When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
|
49 |
+
[`~MCTCTFeatureExtractor.__call__`] and returns its output. If used in the context
|
50 |
+
[`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to AutoTokenizer's
|
51 |
+
[`~AutoTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
|
52 |
+
"""
|
53 |
+
# For backward compatibility
|
54 |
+
if self._in_target_context_manager:
|
55 |
+
return self.current_processor(*args, **kwargs)
|
56 |
+
|
57 |
+
if "raw_speech" in kwargs:
|
58 |
+
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
|
59 |
+
audio = kwargs.pop("raw_speech")
|
60 |
+
else:
|
61 |
+
audio = kwargs.pop("audio", None)
|
62 |
+
sampling_rate = kwargs.pop("sampling_rate", None)
|
63 |
+
text = kwargs.pop("text", None)
|
64 |
+
if len(args) > 0:
|
65 |
+
audio = args[0]
|
66 |
+
args = args[1:]
|
67 |
+
|
68 |
+
if audio is None and text is None:
|
69 |
+
raise ValueError("You need to specify either an `audio` or `text` input to process.")
|
70 |
+
|
71 |
+
if audio is not None:
|
72 |
+
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
|
73 |
+
if text is not None:
|
74 |
+
encodings = self.tokenizer(text, **kwargs)
|
75 |
+
|
76 |
+
if text is None:
|
77 |
+
return inputs
|
78 |
+
elif audio is None:
|
79 |
+
return encodings
|
80 |
+
else:
|
81 |
+
inputs["labels"] = encodings["input_ids"]
|
82 |
+
return inputs
|
83 |
+
|
84 |
+
def batch_decode(self, *args, **kwargs):
|
85 |
+
"""
|
86 |
+
This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
|
87 |
+
to the docstring of this method for more information.
|
88 |
+
"""
|
89 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
90 |
+
|
91 |
+
def pad(self, *args, **kwargs):
|
92 |
+
"""
|
93 |
+
When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
|
94 |
+
[`~MCTCTFeatureExtractor.pad`] and returns its output. If used in the context
|
95 |
+
[`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's
|
96 |
+
[`~PreTrainedTokenizer.pad`]. Please refer to the docstring of the above two methods for more information.
|
97 |
+
"""
|
98 |
+
# For backward compatibility
|
99 |
+
if self._in_target_context_manager:
|
100 |
+
return self.current_processor.pad(*args, **kwargs)
|
101 |
+
|
102 |
+
input_features = kwargs.pop("input_features", None)
|
103 |
+
labels = kwargs.pop("labels", None)
|
104 |
+
if len(args) > 0:
|
105 |
+
input_features = args[0]
|
106 |
+
args = args[1:]
|
107 |
+
|
108 |
+
if input_features is not None:
|
109 |
+
input_features = self.feature_extractor.pad(input_features, *args, **kwargs)
|
110 |
+
if labels is not None:
|
111 |
+
labels = self.tokenizer.pad(labels, **kwargs)
|
112 |
+
|
113 |
+
if labels is None:
|
114 |
+
return input_features
|
115 |
+
elif input_features is None:
|
116 |
+
return labels
|
117 |
+
else:
|
118 |
+
input_features["labels"] = labels["input_ids"]
|
119 |
+
return input_features
|
120 |
+
|
121 |
+
def decode(self, *args, **kwargs):
|
122 |
+
"""
|
123 |
+
This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
|
124 |
+
docstring of this method for more information.
|
125 |
+
"""
|
126 |
+
return self.tokenizer.decode(*args, **kwargs)
|
127 |
+
|
128 |
+
@contextmanager
|
129 |
+
def as_target_processor(self):
|
130 |
+
"""
|
131 |
+
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning MCTCT.
|
132 |
+
"""
|
133 |
+
warnings.warn(
|
134 |
+
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
|
135 |
+
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
|
136 |
+
"your audio inputs, or in a separate call."
|
137 |
+
)
|
138 |
+
self._in_target_context_manager = True
|
139 |
+
self.current_processor = self.tokenizer
|
140 |
+
yield
|
141 |
+
self.current_processor = self.feature_extractor
|
142 |
+
self._in_target_context_manager = False
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {"configuration_mmbt": ["MMBTConfig"]}
|
21 |
+
|
22 |
+
try:
|
23 |
+
if not is_torch_available():
|
24 |
+
raise OptionalDependencyNotAvailable()
|
25 |
+
except OptionalDependencyNotAvailable:
|
26 |
+
pass
|
27 |
+
else:
|
28 |
+
_import_structure["modeling_mmbt"] = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
|
29 |
+
|
30 |
+
|
31 |
+
if TYPE_CHECKING:
|
32 |
+
from .configuration_mmbt import MMBTConfig
|
33 |
+
|
34 |
+
try:
|
35 |
+
if not is_torch_available():
|
36 |
+
raise OptionalDependencyNotAvailable()
|
37 |
+
except OptionalDependencyNotAvailable:
|
38 |
+
pass
|
39 |
+
else:
|
40 |
+
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
|
41 |
+
|
42 |
+
else:
|
43 |
+
import sys
|
44 |
+
|
45 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (811 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc
ADDED
Binary file (1.33 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc
ADDED
Binary file (14.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
+
# Copyright (c) HuggingFace Inc. team.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" MMBT configuration"""
|
17 |
+
|
18 |
+
from ....utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
class MMBTConfig(object):
|
25 |
+
"""
|
26 |
+
This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT
|
27 |
+
model according to the specified arguments, defining the model architecture.
|
28 |
+
|
29 |
+
Args:
|
30 |
+
config ([`PreTrainedConfig`]):
|
31 |
+
Config of the underlying Transformer models. Its values are copied over to use a single config.
|
32 |
+
num_labels (`int`, *optional*):
|
33 |
+
Size of final Linear layer for classification.
|
34 |
+
modal_hidden_size (`int`, *optional*, defaults to 2048):
|
35 |
+
Embedding dimension of the non-text modality encoder.
|
36 |
+
"""
|
37 |
+
|
38 |
+
def __init__(self, config, num_labels=None, modal_hidden_size=2048):
|
39 |
+
self.__dict__ = config.__dict__
|
40 |
+
self.modal_hidden_size = modal_hidden_size
|
41 |
+
if num_labels:
|
42 |
+
self.num_labels = num_labels
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py
ADDED
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
+
# Copyright (c) HuggingFace Inc. team.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""PyTorch MMBT model."""
|
17 |
+
|
18 |
+
|
19 |
+
import torch
|
20 |
+
from torch import nn
|
21 |
+
from torch.nn import CrossEntropyLoss, MSELoss
|
22 |
+
|
23 |
+
from ....modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput
|
24 |
+
from ....modeling_utils import ModuleUtilsMixin
|
25 |
+
from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
_CONFIG_FOR_DOC = "MMBTConfig"
|
31 |
+
|
32 |
+
|
33 |
+
class ModalEmbeddings(nn.Module):
|
34 |
+
"""Generic Modal Embeddings which takes in an encoder, and a transformer embedding."""
|
35 |
+
|
36 |
+
def __init__(self, config, encoder, embeddings):
|
37 |
+
super().__init__()
|
38 |
+
self.config = config
|
39 |
+
self.encoder = encoder
|
40 |
+
self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
|
41 |
+
self.position_embeddings = embeddings.position_embeddings
|
42 |
+
self.token_type_embeddings = embeddings.token_type_embeddings
|
43 |
+
self.word_embeddings = embeddings.word_embeddings
|
44 |
+
self.LayerNorm = embeddings.LayerNorm
|
45 |
+
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
|
46 |
+
|
47 |
+
def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
|
48 |
+
token_embeddings = self.proj_embeddings(self.encoder(input_modal))
|
49 |
+
seq_length = token_embeddings.size(1)
|
50 |
+
|
51 |
+
if start_token is not None:
|
52 |
+
start_token_embeds = self.word_embeddings(start_token)
|
53 |
+
seq_length += 1
|
54 |
+
token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
|
55 |
+
|
56 |
+
if end_token is not None:
|
57 |
+
end_token_embeds = self.word_embeddings(end_token)
|
58 |
+
seq_length += 1
|
59 |
+
token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
|
60 |
+
|
61 |
+
if position_ids is None:
|
62 |
+
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
|
63 |
+
position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
|
64 |
+
|
65 |
+
if token_type_ids is None:
|
66 |
+
token_type_ids = torch.zeros(
|
67 |
+
(input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device
|
68 |
+
)
|
69 |
+
|
70 |
+
position_embeddings = self.position_embeddings(position_ids)
|
71 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
72 |
+
embeddings = token_embeddings + position_embeddings + token_type_embeddings
|
73 |
+
embeddings = self.LayerNorm(embeddings)
|
74 |
+
embeddings = self.dropout(embeddings)
|
75 |
+
return embeddings
|
76 |
+
|
77 |
+
|
78 |
+
MMBT_START_DOCSTRING = r"""
|
79 |
+
MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and
|
80 |
+
Text](https://github.com/facebookresearch/mmbt) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
|
81 |
+
It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and
|
82 |
+
obtain state-of-the-art performance on various multimodal classification benchmark tasks.
|
83 |
+
|
84 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
85 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
86 |
+
etc.)
|
87 |
+
|
88 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
89 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
90 |
+
and behavior.
|
91 |
+
|
92 |
+
Parameters:
|
93 |
+
config ([`MMBTConfig`]): Model configuration class with all the parameters of the model.
|
94 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
95 |
+
configuration.
|
96 |
+
transformer (`nn.Module`): A text transformer that is used by MMBT.
|
97 |
+
It should have embeddings, encoder, and pooler attributes.
|
98 |
+
encoder (`nn.Module`): Encoder for the second modality.
|
99 |
+
It should take in a batch of modal inputs and return k, n dimension embeddings.
|
100 |
+
"""
|
101 |
+
|
102 |
+
MMBT_INPUTS_DOCSTRING = r"""
|
103 |
+
Args:
|
104 |
+
input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`):
|
105 |
+
The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image
|
106 |
+
Encoder, the shape would be (batch_size, channels, height, width)
|
107 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
108 |
+
Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's
|
109 |
+
appended to the end of other modality embeddings. Indices can be obtained using [`AutoTokenizer`]. See
|
110 |
+
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
|
111 |
+
|
112 |
+
[What are input IDs?](../glossary#input-ids)
|
113 |
+
modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
114 |
+
Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification
|
115 |
+
tasks.
|
116 |
+
modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
117 |
+
Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.
|
118 |
+
attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`:
|
119 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
120 |
+
|
121 |
+
- 1 for tokens that are **not masked**,
|
122 |
+
- 0 for tokens that are **masked**.
|
123 |
+
|
124 |
+
[What are attention masks?](../glossary#attention-mask)
|
125 |
+
token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`:
|
126 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
127 |
+
1]`:
|
128 |
+
|
129 |
+
- 0 corresponds to a *sentence A* token,
|
130 |
+
- 1 corresponds to a *sentence B* token.
|
131 |
+
|
132 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
133 |
+
modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`:
|
134 |
+
Segment token indices to indicate different portions of the non-text modality. The embeddings from these
|
135 |
+
tokens will be summed with the respective token embeddings for the non-text modality.
|
136 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
137 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
138 |
+
config.max_position_embeddings - 1]`.
|
139 |
+
|
140 |
+
[What are position IDs?](../glossary#position-ids)
|
141 |
+
modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*):
|
142 |
+
Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.
|
143 |
+
Selected in the range `[0, config.max_position_embeddings - 1]`.
|
144 |
+
|
145 |
+
[What are position IDs?](../glossary#position-ids)
|
146 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
147 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
148 |
+
|
149 |
+
- 1 indicates the head is **not masked**,
|
150 |
+
- 0 indicates the head is **masked**.
|
151 |
+
|
152 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*):
|
153 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
154 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
155 |
+
model's internal embedding lookup matrix.
|
156 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
157 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
158 |
+
the model is configured as a decoder.
|
159 |
+
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
160 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
161 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
162 |
+
|
163 |
+
- 1 for tokens that are **not masked**,
|
164 |
+
- 0 for tokens that are **masked**.
|
165 |
+
|
166 |
+
output_attentions (`bool`, *optional*):
|
167 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
168 |
+
tensors for more detail.
|
169 |
+
output_hidden_states (`bool`, *optional*):
|
170 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
171 |
+
more detail.
|
172 |
+
return_dict (`bool`, *optional*):
|
173 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
174 |
+
"""
|
175 |
+
|
176 |
+
|
177 |
+
@add_start_docstrings(
|
178 |
+
"The bare MMBT Model outputting raw hidden-states without any specific head on top.",
|
179 |
+
MMBT_START_DOCSTRING,
|
180 |
+
)
|
181 |
+
class MMBTModel(nn.Module, ModuleUtilsMixin):
|
182 |
+
def __init__(self, config, transformer, encoder):
|
183 |
+
super().__init__()
|
184 |
+
self.config = config
|
185 |
+
self.transformer = transformer
|
186 |
+
self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
|
187 |
+
|
188 |
+
@add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING)
|
189 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
|
190 |
+
def forward(
|
191 |
+
self,
|
192 |
+
input_modal,
|
193 |
+
input_ids=None,
|
194 |
+
modal_start_tokens=None,
|
195 |
+
modal_end_tokens=None,
|
196 |
+
attention_mask=None,
|
197 |
+
token_type_ids=None,
|
198 |
+
modal_token_type_ids=None,
|
199 |
+
position_ids=None,
|
200 |
+
modal_position_ids=None,
|
201 |
+
head_mask=None,
|
202 |
+
inputs_embeds=None,
|
203 |
+
encoder_hidden_states=None,
|
204 |
+
encoder_attention_mask=None,
|
205 |
+
output_attentions=None,
|
206 |
+
output_hidden_states=None,
|
207 |
+
return_dict=None,
|
208 |
+
):
|
209 |
+
r"""
|
210 |
+
Returns:
|
211 |
+
|
212 |
+
Examples:
|
213 |
+
|
214 |
+
```python
|
215 |
+
# For example purposes. Not runnable.
|
216 |
+
transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
|
217 |
+
encoder = ImageEncoder(args)
|
218 |
+
mmbt = MMBTModel(config, transformer, encoder)
|
219 |
+
```"""
|
220 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
221 |
+
output_hidden_states = (
|
222 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
223 |
+
)
|
224 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
225 |
+
|
226 |
+
if input_ids is not None and inputs_embeds is not None:
|
227 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
228 |
+
elif input_ids is not None:
|
229 |
+
input_txt_shape = input_ids.size()
|
230 |
+
elif inputs_embeds is not None:
|
231 |
+
input_txt_shape = inputs_embeds.size()[:-1]
|
232 |
+
else:
|
233 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
234 |
+
|
235 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
236 |
+
|
237 |
+
modal_embeddings = self.modal_encoder(
|
238 |
+
input_modal,
|
239 |
+
start_token=modal_start_tokens,
|
240 |
+
end_token=modal_end_tokens,
|
241 |
+
position_ids=modal_position_ids,
|
242 |
+
token_type_ids=modal_token_type_ids,
|
243 |
+
)
|
244 |
+
|
245 |
+
input_modal_shape = modal_embeddings.size()[:-1]
|
246 |
+
|
247 |
+
if token_type_ids is None:
|
248 |
+
token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)
|
249 |
+
|
250 |
+
txt_embeddings = self.transformer.embeddings(
|
251 |
+
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
|
252 |
+
)
|
253 |
+
|
254 |
+
embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
|
255 |
+
|
256 |
+
input_shape = embedding_output.size()[:-1]
|
257 |
+
|
258 |
+
if attention_mask is None:
|
259 |
+
attention_mask = torch.ones(input_shape, device=device)
|
260 |
+
else:
|
261 |
+
attention_mask = torch.cat(
|
262 |
+
[torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1
|
263 |
+
)
|
264 |
+
if encoder_attention_mask is None:
|
265 |
+
encoder_attention_mask = torch.ones(input_shape, device=device)
|
266 |
+
else:
|
267 |
+
encoder_attention_mask = torch.cat(
|
268 |
+
[torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1
|
269 |
+
)
|
270 |
+
|
271 |
+
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
|
272 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
273 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
274 |
+
|
275 |
+
encoder_outputs = self.transformer.encoder(
|
276 |
+
embedding_output,
|
277 |
+
attention_mask=extended_attention_mask,
|
278 |
+
head_mask=head_mask,
|
279 |
+
encoder_hidden_states=encoder_hidden_states,
|
280 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
281 |
+
output_attentions=output_attentions,
|
282 |
+
output_hidden_states=output_hidden_states,
|
283 |
+
return_dict=return_dict,
|
284 |
+
)
|
285 |
+
|
286 |
+
sequence_output = encoder_outputs[0]
|
287 |
+
pooled_output = self.transformer.pooler(sequence_output)
|
288 |
+
|
289 |
+
if not return_dict:
|
290 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
291 |
+
|
292 |
+
return BaseModelOutputWithPooling(
|
293 |
+
last_hidden_state=sequence_output,
|
294 |
+
pooler_output=pooled_output,
|
295 |
+
hidden_states=encoder_outputs.hidden_states,
|
296 |
+
attentions=encoder_outputs.attentions,
|
297 |
+
)
|
298 |
+
|
299 |
+
def get_input_embeddings(self):
|
300 |
+
return self.embeddings.word_embeddings
|
301 |
+
|
302 |
+
def set_input_embeddings(self, value):
|
303 |
+
self.embeddings.word_embeddings = value
|
304 |
+
|
305 |
+
|
306 |
+
@add_start_docstrings(
|
307 |
+
"""
|
308 |
+
MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
|
309 |
+
""",
|
310 |
+
MMBT_START_DOCSTRING,
|
311 |
+
MMBT_INPUTS_DOCSTRING,
|
312 |
+
)
|
313 |
+
class MMBTForClassification(nn.Module):
|
314 |
+
r"""
|
315 |
+
**labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`:
|
316 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
317 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
318 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
319 |
+
|
320 |
+
Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**:
|
321 |
+
(*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or
|
322 |
+
regression if config.num_labels==1) loss. **logits**:
|
323 |
+
`torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if
|
324 |
+
config.num_labels==1) scores (before SoftMax).
|
325 |
+
**hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for
|
326 |
+
the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`:
|
327 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**:
|
328 |
+
(*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape
|
329 |
+
`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used
|
330 |
+
to compute the weighted average in the self-attention heads.
|
331 |
+
|
332 |
+
Examples:
|
333 |
+
|
334 |
+
```python
|
335 |
+
# For example purposes. Not runnable.
|
336 |
+
transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
|
337 |
+
encoder = ImageEncoder(args)
|
338 |
+
model = MMBTForClassification(config, transformer, encoder)
|
339 |
+
outputs = model(input_modal, input_ids, labels=labels)
|
340 |
+
loss, logits = outputs[:2]
|
341 |
+
```"""
|
342 |
+
|
343 |
+
def __init__(self, config, transformer, encoder):
|
344 |
+
super().__init__()
|
345 |
+
self.num_labels = config.num_labels
|
346 |
+
|
347 |
+
self.mmbt = MMBTModel(config, transformer, encoder)
|
348 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
349 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
350 |
+
|
351 |
+
def forward(
|
352 |
+
self,
|
353 |
+
input_modal,
|
354 |
+
input_ids=None,
|
355 |
+
modal_start_tokens=None,
|
356 |
+
modal_end_tokens=None,
|
357 |
+
attention_mask=None,
|
358 |
+
token_type_ids=None,
|
359 |
+
modal_token_type_ids=None,
|
360 |
+
position_ids=None,
|
361 |
+
modal_position_ids=None,
|
362 |
+
head_mask=None,
|
363 |
+
inputs_embeds=None,
|
364 |
+
labels=None,
|
365 |
+
return_dict=None,
|
366 |
+
):
|
367 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
368 |
+
|
369 |
+
outputs = self.mmbt(
|
370 |
+
input_modal=input_modal,
|
371 |
+
input_ids=input_ids,
|
372 |
+
modal_start_tokens=modal_start_tokens,
|
373 |
+
modal_end_tokens=modal_end_tokens,
|
374 |
+
attention_mask=attention_mask,
|
375 |
+
token_type_ids=token_type_ids,
|
376 |
+
modal_token_type_ids=modal_token_type_ids,
|
377 |
+
position_ids=position_ids,
|
378 |
+
modal_position_ids=modal_position_ids,
|
379 |
+
head_mask=head_mask,
|
380 |
+
inputs_embeds=inputs_embeds,
|
381 |
+
return_dict=return_dict,
|
382 |
+
)
|
383 |
+
|
384 |
+
pooled_output = outputs[1]
|
385 |
+
|
386 |
+
pooled_output = self.dropout(pooled_output)
|
387 |
+
logits = self.classifier(pooled_output)
|
388 |
+
|
389 |
+
loss = None
|
390 |
+
if labels is not None:
|
391 |
+
if self.num_labels == 1:
|
392 |
+
# We are doing regression
|
393 |
+
loss_fct = MSELoss()
|
394 |
+
loss = loss_fct(logits.view(-1), labels.view(-1))
|
395 |
+
else:
|
396 |
+
loss_fct = CrossEntropyLoss()
|
397 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
398 |
+
|
399 |
+
if not return_dict:
|
400 |
+
output = (logits,) + outputs[2:]
|
401 |
+
return ((loss,) + output) if loss is not None else output
|
402 |
+
|
403 |
+
return SequenceClassifierOutput(
|
404 |
+
loss=loss,
|
405 |
+
logits=logits,
|
406 |
+
hidden_states=outputs.hidden_states,
|
407 |
+
attentions=outputs.attentions,
|
408 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ....utils import _LazyModule
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {"tokenization_tapex": ["TapexTokenizer"]}
|
20 |
+
|
21 |
+
|
22 |
+
if TYPE_CHECKING:
|
23 |
+
from .tokenization_tapex import TapexTokenizer
|
24 |
+
|
25 |
+
|
26 |
+
else:
|
27 |
+
import sys
|
28 |
+
|
29 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (491 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc
ADDED
Binary file (41.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py
ADDED
@@ -0,0 +1,1467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for TAPEX."""
|
16 |
+
|
17 |
+
import json
|
18 |
+
import os
|
19 |
+
import random
|
20 |
+
from functools import lru_cache
|
21 |
+
from typing import Dict, List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import regex as re
|
24 |
+
|
25 |
+
from ....file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available
|
26 |
+
from ....tokenization_utils import AddedToken, PreTrainedTokenizer
|
27 |
+
from ....tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, TextInput, TruncationStrategy
|
28 |
+
from ....utils import logging
|
29 |
+
|
30 |
+
|
31 |
+
if is_pandas_available():
|
32 |
+
import pandas as pd
|
33 |
+
|
34 |
+
|
35 |
+
logger = logging.get_logger(__name__)
|
36 |
+
|
37 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
|
38 |
+
|
39 |
+
|
40 |
+
class TapexTruncationStrategy(ExplicitEnum):
|
41 |
+
"""
|
42 |
+
Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
|
43 |
+
"""
|
44 |
+
|
45 |
+
DROP_ROWS_TO_FIT = "drop_rows_to_fit"
|
46 |
+
|
47 |
+
|
48 |
+
TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
|
49 |
+
add_special_tokens (`bool`, *optional*, defaults to `True`):
|
50 |
+
Whether or not to encode the sequences with the special tokens relative to their model.
|
51 |
+
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
|
52 |
+
Activates and controls padding. Accepts the following values:
|
53 |
+
|
54 |
+
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
55 |
+
sequence if provided).
|
56 |
+
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
57 |
+
acceptable input length for the model if that argument is not provided.
|
58 |
+
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
59 |
+
lengths).
|
60 |
+
truncation (`bool`, `str`, [`TapexTruncationStrategy`] or [`~tokenization_utils_base.TruncationStrategy`],
|
61 |
+
*optional*, defaults to `False`):
|
62 |
+
|
63 |
+
Activates and controls truncation. Accepts the following values:
|
64 |
+
|
65 |
+
- `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the
|
66 |
+
maximum acceptable input length for the model if that argument is not provided. This will truncate
|
67 |
+
row by row, removing rows from the table.
|
68 |
+
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
|
69 |
+
to the maximum acceptable input length for the model if that argument is not provided. This will
|
70 |
+
truncate token by token, removing a token from the longest sequence in the pair if a pair of
|
71 |
+
sequences (or a batch of pairs) is provided.
|
72 |
+
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
|
73 |
+
maximum acceptable input length for the model if that argument is not provided. This will only
|
74 |
+
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
75 |
+
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
|
76 |
+
maximum acceptable input length for the model if that argument is not provided. This will only
|
77 |
+
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
78 |
+
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
|
79 |
+
greater than the model maximum admissible input size).
|
80 |
+
max_length (`int`, *optional*):
|
81 |
+
Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
|
82 |
+
`None`, this will use the predefined model maximum length if a maximum length is required by one of the
|
83 |
+
truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
|
84 |
+
truncation/padding to a maximum length will be deactivated.
|
85 |
+
stride (`int`, *optional*, defaults to 0):
|
86 |
+
If set to a number along with `max_length`, the overflowing tokens returned when
|
87 |
+
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
|
88 |
+
returned to provide some overlap between truncated and overflowing sequences. The value of this
|
89 |
+
argument defines the number of overlapping tokens.
|
90 |
+
pad_to_multiple_of (`int`, *optional*):
|
91 |
+
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
|
92 |
+
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
|
93 |
+
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
|
94 |
+
If set, will return tensors instead of list of python integers. Acceptable values are:
|
95 |
+
|
96 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
97 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
98 |
+
- `'np'`: Return Numpy `np.ndarray` objects.
|
99 |
+
"""
|
100 |
+
|
101 |
+
|
102 |
+
@lru_cache()
|
103 |
+
def bytes_to_unicode():
|
104 |
+
"""
|
105 |
+
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
|
106 |
+
characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
|
107 |
+
of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
|
108 |
+
you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
|
109 |
+
vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
110 |
+
"""
|
111 |
+
bs = (
|
112 |
+
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
113 |
+
)
|
114 |
+
cs = bs[:]
|
115 |
+
n = 0
|
116 |
+
for b in range(2**8):
|
117 |
+
if b not in bs:
|
118 |
+
bs.append(b)
|
119 |
+
cs.append(2**8 + n)
|
120 |
+
n += 1
|
121 |
+
cs = [chr(n) for n in cs]
|
122 |
+
return dict(zip(bs, cs))
|
123 |
+
|
124 |
+
|
125 |
+
def get_pairs(word):
|
126 |
+
"""
|
127 |
+
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
|
128 |
+
strings).
|
129 |
+
"""
|
130 |
+
pairs = set()
|
131 |
+
prev_char = word[0]
|
132 |
+
for char in word[1:]:
|
133 |
+
pairs.add((prev_char, char))
|
134 |
+
prev_char = char
|
135 |
+
return pairs
|
136 |
+
|
137 |
+
|
138 |
+
class IndexedRowTableLinearize:
|
139 |
+
"""
|
140 |
+
FORMAT: col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
|
141 |
+
"""
|
142 |
+
|
143 |
+
def process_table(self, table_content: Dict):
|
144 |
+
"""
|
145 |
+
Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.
|
146 |
+
"""
|
147 |
+
assert "header" in table_content and "rows" in table_content, self.PROMPT_MESSAGE
|
148 |
+
# process header
|
149 |
+
table_str = self.process_header(table_content["header"]) + " "
|
150 |
+
# process rows
|
151 |
+
for i, row_example in enumerate(table_content["rows"]):
|
152 |
+
# NOTE: the row should start from row 1 instead of 0
|
153 |
+
table_str += self.process_row(row_example, row_index=i + 1) + " "
|
154 |
+
return table_str.strip()
|
155 |
+
|
156 |
+
def process_header(self, headers: List):
|
157 |
+
"""
|
158 |
+
Given a list of headers, TableLinearize aims at converting it into a flatten sequence with special symbols.
|
159 |
+
"""
|
160 |
+
return "col : " + " | ".join(headers)
|
161 |
+
|
162 |
+
def process_row(self, row: List, row_index: int):
|
163 |
+
"""
|
164 |
+
Given a row, TableLinearize aims at converting it into a flatten sequence with special symbols.
|
165 |
+
"""
|
166 |
+
row_str = ""
|
167 |
+
row_cell_values = []
|
168 |
+
for cell_value in row:
|
169 |
+
if isinstance(cell_value, int):
|
170 |
+
row_cell_values.append(str(cell_value))
|
171 |
+
else:
|
172 |
+
row_cell_values.append(cell_value)
|
173 |
+
row_str += " | ".join(row_cell_values)
|
174 |
+
return "row " + str(row_index) + " : " + row_str
|
175 |
+
|
176 |
+
|
177 |
+
class TapexTokenizer(PreTrainedTokenizer):
|
178 |
+
r"""
|
179 |
+
Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
|
180 |
+
|
181 |
+
This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences
|
182 |
+
to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following:
|
183 |
+
|
184 |
+
sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
|
185 |
+
|
186 |
+
The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table
|
187 |
+
will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated
|
188 |
+
for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to
|
189 |
+
the tokenizer for instance to prepare them for the model.
|
190 |
+
|
191 |
+
Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2.
|
192 |
+
|
193 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
194 |
+
this superclass for more information regarding those methods.
|
195 |
+
|
196 |
+
Args:
|
197 |
+
vocab_file (`str`):
|
198 |
+
Path to the vocabulary file.
|
199 |
+
merges_file (`str`):
|
200 |
+
Path to the merges file.
|
201 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
202 |
+
Whether or not to lowercase the input when tokenizing.
|
203 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
204 |
+
Paradigm to follow when decoding bytes to UTF-8. See
|
205 |
+
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
206 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
207 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
208 |
+
|
209 |
+
<Tip>
|
210 |
+
|
211 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
212 |
+
sequence. The token used is the `cls_token`.
|
213 |
+
|
214 |
+
</Tip>
|
215 |
+
|
216 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
217 |
+
The end of sequence token.
|
218 |
+
|
219 |
+
<Tip>
|
220 |
+
|
221 |
+
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
|
222 |
+
The token used is the `sep_token`.
|
223 |
+
|
224 |
+
</Tip>
|
225 |
+
|
226 |
+
sep_token (`str`, *optional*, defaults to `"</s>"`):
|
227 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
228 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
229 |
+
token of a sequence built with special tokens.
|
230 |
+
cls_token (`str`, *optional*, defaults to `"<s>"`):
|
231 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
232 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
233 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
234 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
235 |
+
token instead.
|
236 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
237 |
+
The token used for padding, for example when batching sequences of different lengths.
|
238 |
+
mask_token (`str`, *optional*, defaults to `"<mask>"`):
|
239 |
+
The token used for masking values. This is the token used when training this model with masked language
|
240 |
+
modeling. This is the token which the model will try to predict.
|
241 |
+
add_prefix_space (`bool`, *optional*, defaults to `False`):
|
242 |
+
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
|
243 |
+
other word. (BART tokenizer detect beginning of words by the preceding space).
|
244 |
+
max_cell_length (`int`, *optional*, defaults to 15):
|
245 |
+
Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation
|
246 |
+
takes place.
|
247 |
+
"""
|
248 |
+
|
249 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
250 |
+
model_input_names = ["input_ids", "attention_mask"]
|
251 |
+
|
252 |
+
def __init__(
|
253 |
+
self,
|
254 |
+
vocab_file,
|
255 |
+
merges_file,
|
256 |
+
do_lower_case=True,
|
257 |
+
errors="replace",
|
258 |
+
bos_token="<s>",
|
259 |
+
eos_token="</s>",
|
260 |
+
sep_token="</s>",
|
261 |
+
cls_token="<s>",
|
262 |
+
unk_token="<unk>",
|
263 |
+
pad_token="<pad>",
|
264 |
+
mask_token="<mask>",
|
265 |
+
add_prefix_space=False,
|
266 |
+
max_cell_length=15,
|
267 |
+
**kwargs,
|
268 |
+
):
|
269 |
+
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
|
270 |
+
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
|
271 |
+
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
|
272 |
+
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
|
273 |
+
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
|
274 |
+
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
|
275 |
+
|
276 |
+
# Mask token behave like a normal word, i.e. include the space before it
|
277 |
+
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
|
278 |
+
|
279 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
280 |
+
self.encoder = json.load(vocab_handle)
|
281 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
282 |
+
self.errors = errors # how to handle errors in decoding
|
283 |
+
self.byte_encoder = bytes_to_unicode()
|
284 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
285 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
286 |
+
bpe_merges = merges_handle.read().split("\n")[1:-1]
|
287 |
+
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
|
288 |
+
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
|
289 |
+
self.cache = {}
|
290 |
+
self.add_prefix_space = add_prefix_space
|
291 |
+
self.do_lower_case = do_lower_case
|
292 |
+
|
293 |
+
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
|
294 |
+
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
|
295 |
+
|
296 |
+
# additional properties
|
297 |
+
|
298 |
+
super().__init__(
|
299 |
+
vocab_file=vocab_file,
|
300 |
+
merges_file=merges_file,
|
301 |
+
do_lower_case=do_lower_case,
|
302 |
+
errors=errors,
|
303 |
+
bos_token=bos_token,
|
304 |
+
eos_token=eos_token,
|
305 |
+
unk_token=unk_token,
|
306 |
+
sep_token=sep_token,
|
307 |
+
cls_token=cls_token,
|
308 |
+
pad_token=pad_token,
|
309 |
+
mask_token=mask_token,
|
310 |
+
add_prefix_space=add_prefix_space,
|
311 |
+
max_cell_length=max_cell_length,
|
312 |
+
**kwargs,
|
313 |
+
)
|
314 |
+
|
315 |
+
self.max_cell_length = max_cell_length
|
316 |
+
self.table_linearize = IndexedRowTableLinearize()
|
317 |
+
|
318 |
+
def build_inputs_with_special_tokens(
|
319 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
320 |
+
) -> List[int]:
|
321 |
+
"""
|
322 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
323 |
+
adding special tokens. A TAPEX sequence has the following format:
|
324 |
+
- single sequence: `<s> X </s>`
|
325 |
+
- pair of sequences: `<s> A </s></s> B </s>`
|
326 |
+
|
327 |
+
Args:
|
328 |
+
token_ids_0 (`List[int]`):
|
329 |
+
List of IDs to which the special tokens will be added.
|
330 |
+
token_ids_1 (`List[int]`, *optional*):
|
331 |
+
Optional second list of IDs for sequence pairs.
|
332 |
+
Returns:
|
333 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
334 |
+
"""
|
335 |
+
if token_ids_1 is None:
|
336 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
337 |
+
cls = [self.cls_token_id]
|
338 |
+
sep = [self.sep_token_id]
|
339 |
+
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
|
340 |
+
|
341 |
+
def get_special_tokens_mask(
|
342 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
343 |
+
) -> List[int]:
|
344 |
+
"""
|
345 |
+
Args:
|
346 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
347 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
348 |
+
token_ids_0 (`List[int]`):
|
349 |
+
List of IDs.
|
350 |
+
token_ids_1 (`List[int]`, *optional*):
|
351 |
+
Optional second list of IDs for sequence pairs.
|
352 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
353 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
354 |
+
Returns:
|
355 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
356 |
+
"""
|
357 |
+
if already_has_special_tokens:
|
358 |
+
return super().get_special_tokens_mask(
|
359 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
360 |
+
)
|
361 |
+
|
362 |
+
if token_ids_1 is None:
|
363 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
364 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
365 |
+
|
366 |
+
def create_token_type_ids_from_sequences(
|
367 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
368 |
+
) -> List[int]:
|
369 |
+
"""
|
370 |
+
Args:
|
371 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. TAPEX does not:
|
372 |
+
make use of token type ids, therefore a list of zeros is returned.
|
373 |
+
token_ids_0 (`List[int]`):
|
374 |
+
List of IDs.
|
375 |
+
token_ids_1 (`List[int]`, *optional*):
|
376 |
+
Optional second list of IDs for sequence pairs.
|
377 |
+
Returns:
|
378 |
+
`List[int]`: List of zeros.
|
379 |
+
"""
|
380 |
+
sep = [self.sep_token_id]
|
381 |
+
cls = [self.cls_token_id]
|
382 |
+
|
383 |
+
if token_ids_1 is None:
|
384 |
+
return len(cls + token_ids_0 + sep) * [0]
|
385 |
+
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
386 |
+
|
387 |
+
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
|
388 |
+
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
|
389 |
+
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
|
390 |
+
text = " " + text
|
391 |
+
return (text, kwargs)
|
392 |
+
|
393 |
+
@property
|
394 |
+
def vocab_size(self):
|
395 |
+
return len(self.encoder)
|
396 |
+
|
397 |
+
def get_vocab(self):
|
398 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
399 |
+
|
400 |
+
def bpe(self, token):
|
401 |
+
if token in self.cache:
|
402 |
+
return self.cache[token]
|
403 |
+
word = tuple(token)
|
404 |
+
pairs = get_pairs(word)
|
405 |
+
|
406 |
+
if not pairs:
|
407 |
+
return token
|
408 |
+
|
409 |
+
while True:
|
410 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
411 |
+
if bigram not in self.bpe_ranks:
|
412 |
+
break
|
413 |
+
first, second = bigram
|
414 |
+
new_word = []
|
415 |
+
i = 0
|
416 |
+
while i < len(word):
|
417 |
+
try:
|
418 |
+
j = word.index(first, i)
|
419 |
+
except ValueError:
|
420 |
+
new_word.extend(word[i:])
|
421 |
+
break
|
422 |
+
else:
|
423 |
+
new_word.extend(word[i:j])
|
424 |
+
i = j
|
425 |
+
|
426 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
427 |
+
new_word.append(first + second)
|
428 |
+
i += 2
|
429 |
+
else:
|
430 |
+
new_word.append(word[i])
|
431 |
+
i += 1
|
432 |
+
new_word = tuple(new_word)
|
433 |
+
word = new_word
|
434 |
+
if len(word) == 1:
|
435 |
+
break
|
436 |
+
else:
|
437 |
+
pairs = get_pairs(word)
|
438 |
+
word = " ".join(word)
|
439 |
+
self.cache[token] = word
|
440 |
+
return word
|
441 |
+
|
442 |
+
def _tokenize(self, text):
|
443 |
+
"""Tokenize a string."""
|
444 |
+
bpe_tokens = []
|
445 |
+
for token in re.findall(self.pat, text):
|
446 |
+
token = "".join(
|
447 |
+
self.byte_encoder[b] for b in token.encode("utf-8")
|
448 |
+
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
|
449 |
+
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
|
450 |
+
return bpe_tokens
|
451 |
+
|
452 |
+
def _convert_token_to_id(self, token):
|
453 |
+
"""Converts a token (str) in an id using the vocab."""
|
454 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
455 |
+
|
456 |
+
def _convert_id_to_token(self, index):
|
457 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
458 |
+
return self.decoder.get(index)
|
459 |
+
|
460 |
+
def convert_tokens_to_string(self, tokens):
|
461 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
462 |
+
text = "".join(tokens)
|
463 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
|
464 |
+
return text
|
465 |
+
|
466 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
467 |
+
if not os.path.isdir(save_directory):
|
468 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
469 |
+
return
|
470 |
+
vocab_file = os.path.join(
|
471 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
472 |
+
)
|
473 |
+
merge_file = os.path.join(
|
474 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
475 |
+
)
|
476 |
+
|
477 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
478 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
479 |
+
|
480 |
+
index = 0
|
481 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
482 |
+
writer.write("#version: 0.2\n")
|
483 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
484 |
+
if index != token_index:
|
485 |
+
logger.warning(
|
486 |
+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
487 |
+
" Please check that the tokenizer is not corrupted!"
|
488 |
+
)
|
489 |
+
index = token_index
|
490 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
491 |
+
index += 1
|
492 |
+
|
493 |
+
return vocab_file, merge_file
|
494 |
+
|
495 |
+
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
496 |
+
def __call__(
|
497 |
+
self,
|
498 |
+
table: Union["pd.DataFrame", List["pd.DataFrame"]] = None,
|
499 |
+
query: Optional[Union[TextInput, List[TextInput]]] = None,
|
500 |
+
answer: Union[str, List[str]] = None,
|
501 |
+
add_special_tokens: bool = True,
|
502 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
503 |
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
504 |
+
max_length: Optional[int] = None,
|
505 |
+
stride: int = 0,
|
506 |
+
pad_to_multiple_of: Optional[int] = None,
|
507 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
508 |
+
return_token_type_ids: Optional[bool] = None,
|
509 |
+
return_attention_mask: Optional[bool] = None,
|
510 |
+
return_overflowing_tokens: bool = False,
|
511 |
+
return_special_tokens_mask: bool = False,
|
512 |
+
return_offsets_mapping: bool = False,
|
513 |
+
return_length: bool = False,
|
514 |
+
verbose: bool = True,
|
515 |
+
**kwargs,
|
516 |
+
) -> BatchEncoding:
|
517 |
+
"""
|
518 |
+
Main method to tokenize and prepare for the model one or several table-sequence pair(s).
|
519 |
+
|
520 |
+
Args:
|
521 |
+
table (`pd.DataFrame`, `List[pd.DataFrame]`):
|
522 |
+
Table(s) containing tabular data.
|
523 |
+
query (`str` or `List[str]`, *optional*):
|
524 |
+
Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of
|
525 |
+
sentences must match the number of tables.
|
526 |
+
answer (`str` or `List[str]`, *optional*):
|
527 |
+
Optionally, the corresponding answer to the questions as supervision.
|
528 |
+
"""
|
529 |
+
|
530 |
+
if table is not None:
|
531 |
+
return self.source_call_func(
|
532 |
+
table=table,
|
533 |
+
query=query,
|
534 |
+
answer=answer,
|
535 |
+
add_special_tokens=add_special_tokens,
|
536 |
+
padding=padding,
|
537 |
+
truncation=truncation,
|
538 |
+
max_length=max_length,
|
539 |
+
stride=stride,
|
540 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
541 |
+
return_tensors=return_tensors,
|
542 |
+
return_token_type_ids=return_token_type_ids,
|
543 |
+
return_attention_mask=return_attention_mask,
|
544 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
545 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
546 |
+
return_offsets_mapping=return_offsets_mapping,
|
547 |
+
return_length=return_length,
|
548 |
+
verbose=verbose,
|
549 |
+
**kwargs,
|
550 |
+
)
|
551 |
+
elif answer is not None:
|
552 |
+
return self.target_call_func(
|
553 |
+
answer=answer,
|
554 |
+
add_special_tokens=add_special_tokens,
|
555 |
+
padding=padding,
|
556 |
+
truncation=truncation,
|
557 |
+
max_length=max_length,
|
558 |
+
stride=stride,
|
559 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
560 |
+
return_tensors=return_tensors,
|
561 |
+
return_token_type_ids=return_token_type_ids,
|
562 |
+
return_attention_mask=return_attention_mask,
|
563 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
564 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
565 |
+
return_offsets_mapping=return_offsets_mapping,
|
566 |
+
return_length=return_length,
|
567 |
+
verbose=verbose,
|
568 |
+
**kwargs,
|
569 |
+
)
|
570 |
+
else:
|
571 |
+
raise ValueError("You need to provide either a `table` or an `answer`.")
|
572 |
+
|
573 |
+
def source_call_func(
|
574 |
+
self,
|
575 |
+
table: Union["pd.DataFrame", List["pd.DataFrame"]],
|
576 |
+
query: Optional[Union[TextInput, List[TextInput]]] = None,
|
577 |
+
answer: Union[str, List[str]] = None,
|
578 |
+
add_special_tokens: bool = True,
|
579 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
580 |
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
581 |
+
max_length: Optional[int] = None,
|
582 |
+
stride: int = 0,
|
583 |
+
pad_to_multiple_of: Optional[int] = None,
|
584 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
585 |
+
return_token_type_ids: Optional[bool] = None,
|
586 |
+
return_attention_mask: Optional[bool] = None,
|
587 |
+
return_overflowing_tokens: bool = False,
|
588 |
+
return_special_tokens_mask: bool = False,
|
589 |
+
return_offsets_mapping: bool = False,
|
590 |
+
return_length: bool = False,
|
591 |
+
verbose: bool = True,
|
592 |
+
**kwargs,
|
593 |
+
) -> BatchEncoding:
|
594 |
+
# Input type checking for clearer error
|
595 |
+
valid_table = False
|
596 |
+
valid_query = False
|
597 |
+
|
598 |
+
# Check that table have a valid type
|
599 |
+
if isinstance(table, pd.DataFrame):
|
600 |
+
valid_table = True
|
601 |
+
elif isinstance(table, (list, tuple)) and isinstance(table[0], pd.DataFrame):
|
602 |
+
valid_table = True
|
603 |
+
|
604 |
+
# Check that query have a valid type
|
605 |
+
if query is None or isinstance(query, str):
|
606 |
+
valid_query = True
|
607 |
+
elif isinstance(query, (list, tuple)):
|
608 |
+
if len(query) == 0 or isinstance(query[0], str):
|
609 |
+
valid_query = True
|
610 |
+
|
611 |
+
if not valid_table:
|
612 |
+
raise ValueError(
|
613 |
+
"table input must of type `pd.DataFrame` (single example), `List[pd.DataFrame]` (batch of examples). "
|
614 |
+
)
|
615 |
+
if not valid_query:
|
616 |
+
raise ValueError("query input must of type `str` (single example), `List[str]` (batch of examples). ")
|
617 |
+
is_batched = isinstance(table, (list, tuple)) or isinstance(query, (list, tuple))
|
618 |
+
|
619 |
+
if is_batched:
|
620 |
+
return self.batch_encode_plus(
|
621 |
+
table=table,
|
622 |
+
query=query,
|
623 |
+
answer=answer,
|
624 |
+
add_special_tokens=add_special_tokens,
|
625 |
+
padding=padding,
|
626 |
+
truncation=truncation,
|
627 |
+
max_length=max_length,
|
628 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
629 |
+
return_tensors=return_tensors,
|
630 |
+
return_token_type_ids=return_token_type_ids,
|
631 |
+
return_attention_mask=return_attention_mask,
|
632 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
633 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
634 |
+
return_offsets_mapping=return_offsets_mapping,
|
635 |
+
return_length=return_length,
|
636 |
+
verbose=verbose,
|
637 |
+
**kwargs,
|
638 |
+
)
|
639 |
+
else:
|
640 |
+
return self.encode_plus(
|
641 |
+
table=table,
|
642 |
+
query=query,
|
643 |
+
answer=answer,
|
644 |
+
add_special_tokens=add_special_tokens,
|
645 |
+
padding=padding,
|
646 |
+
truncation=truncation,
|
647 |
+
max_length=max_length,
|
648 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
649 |
+
return_tensors=return_tensors,
|
650 |
+
return_token_type_ids=return_token_type_ids,
|
651 |
+
return_attention_mask=return_attention_mask,
|
652 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
653 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
654 |
+
return_offsets_mapping=return_offsets_mapping,
|
655 |
+
return_length=return_length,
|
656 |
+
verbose=verbose,
|
657 |
+
**kwargs,
|
658 |
+
)
|
659 |
+
|
660 |
+
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
661 |
+
def batch_encode_plus(
|
662 |
+
self,
|
663 |
+
table: Union["pd.DataFrame", List["pd.DataFrame"]],
|
664 |
+
query: Optional[List[TextInput]] = None,
|
665 |
+
answer: List[str] = None,
|
666 |
+
add_special_tokens: bool = True,
|
667 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
668 |
+
truncation: Union[bool, str] = None,
|
669 |
+
max_length: Optional[int] = None,
|
670 |
+
pad_to_multiple_of: Optional[int] = None,
|
671 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
672 |
+
return_token_type_ids: Optional[bool] = None,
|
673 |
+
return_attention_mask: Optional[bool] = None,
|
674 |
+
return_overflowing_tokens: bool = False,
|
675 |
+
return_special_tokens_mask: bool = False,
|
676 |
+
return_offsets_mapping: bool = False,
|
677 |
+
return_length: bool = False,
|
678 |
+
verbose: bool = True,
|
679 |
+
**kwargs,
|
680 |
+
) -> BatchEncoding:
|
681 |
+
"""
|
682 |
+
<Tip warning={true}>
|
683 |
+
|
684 |
+
This method is deprecated, `__call__` should be used instead.
|
685 |
+
|
686 |
+
</Tip>
|
687 |
+
"""
|
688 |
+
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
|
689 |
+
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
|
690 |
+
padding=padding,
|
691 |
+
truncation=truncation,
|
692 |
+
max_length=max_length,
|
693 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
694 |
+
verbose=verbose,
|
695 |
+
**kwargs,
|
696 |
+
)
|
697 |
+
|
698 |
+
return self._batch_encode_plus(
|
699 |
+
table=table,
|
700 |
+
query=query,
|
701 |
+
answer=answer,
|
702 |
+
add_special_tokens=add_special_tokens,
|
703 |
+
padding_strategy=padding_strategy,
|
704 |
+
truncation_strategy=truncation_strategy,
|
705 |
+
max_length=max_length,
|
706 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
707 |
+
return_tensors=return_tensors,
|
708 |
+
return_token_type_ids=return_token_type_ids,
|
709 |
+
return_attention_mask=return_attention_mask,
|
710 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
711 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
712 |
+
return_offsets_mapping=return_offsets_mapping,
|
713 |
+
return_length=return_length,
|
714 |
+
verbose=verbose,
|
715 |
+
**kwargs,
|
716 |
+
)
|
717 |
+
|
718 |
+
def _batch_encode_plus(
|
719 |
+
self,
|
720 |
+
table: Union["pd.DataFrame", List["pd.DataFrame"]],
|
721 |
+
query: Optional[List[TextInput]] = None,
|
722 |
+
answer: Optional[List[str]] = None,
|
723 |
+
add_special_tokens: bool = True,
|
724 |
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
725 |
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
726 |
+
max_length: Optional[int] = None,
|
727 |
+
stride: int = 0,
|
728 |
+
pad_to_multiple_of: Optional[int] = None,
|
729 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
730 |
+
return_token_type_ids: Optional[bool] = None,
|
731 |
+
return_attention_mask: Optional[bool] = None,
|
732 |
+
return_overflowing_tokens: bool = False,
|
733 |
+
return_special_tokens_mask: bool = False,
|
734 |
+
return_offsets_mapping: bool = False,
|
735 |
+
return_length: bool = False,
|
736 |
+
verbose: bool = True,
|
737 |
+
**kwargs,
|
738 |
+
) -> BatchEncoding:
|
739 |
+
if return_offsets_mapping:
|
740 |
+
raise NotImplementedError(
|
741 |
+
"return_offset_mapping is not available when using Python tokenizers. "
|
742 |
+
"To use this feature, change your tokenizer to one deriving from "
|
743 |
+
"transformers.PreTrainedTokenizerFast."
|
744 |
+
)
|
745 |
+
|
746 |
+
if isinstance(table, pd.DataFrame) and isinstance(query, (list, tuple)):
|
747 |
+
# single table, many queries case
|
748 |
+
# duplicate table for every query
|
749 |
+
table = [table] * len(query)
|
750 |
+
if isinstance(table, (list, tuple)) and isinstance(query, str):
|
751 |
+
# many tables, single query case
|
752 |
+
# duplicate query for every table
|
753 |
+
query = [query] * len(table)
|
754 |
+
|
755 |
+
batch_outputs = self._batch_prepare_for_model(
|
756 |
+
table=table,
|
757 |
+
query=query,
|
758 |
+
answer=answer,
|
759 |
+
add_special_tokens=add_special_tokens,
|
760 |
+
padding_strategy=padding_strategy,
|
761 |
+
truncation_strategy=truncation_strategy,
|
762 |
+
max_length=max_length,
|
763 |
+
stride=stride,
|
764 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
765 |
+
return_attention_mask=return_attention_mask,
|
766 |
+
return_token_type_ids=return_token_type_ids,
|
767 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
768 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
769 |
+
return_length=return_length,
|
770 |
+
return_tensors=return_tensors,
|
771 |
+
verbose=verbose,
|
772 |
+
)
|
773 |
+
|
774 |
+
return BatchEncoding(batch_outputs)
|
775 |
+
|
776 |
+
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
777 |
+
def _batch_prepare_for_model(
|
778 |
+
self,
|
779 |
+
table: Union["pd.DataFrame", List["pd.DataFrame"]],
|
780 |
+
query: Optional[Union[TextInput, List[TextInput]]] = None,
|
781 |
+
answer: Optional[Union[str, List[str]]] = None,
|
782 |
+
add_special_tokens: bool = True,
|
783 |
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
784 |
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
785 |
+
max_length: Optional[int] = None,
|
786 |
+
stride: int = 0,
|
787 |
+
pad_to_multiple_of: Optional[int] = None,
|
788 |
+
return_tensors: Optional[str] = None,
|
789 |
+
return_token_type_ids: Optional[bool] = None,
|
790 |
+
return_attention_mask: Optional[bool] = None,
|
791 |
+
return_overflowing_tokens: bool = False,
|
792 |
+
return_special_tokens_mask: bool = False,
|
793 |
+
return_length: bool = False,
|
794 |
+
verbose: bool = True,
|
795 |
+
) -> BatchEncoding:
|
796 |
+
"""
|
797 |
+
This method adds special tokens, truncates sequences if overflowing while taking into account the special
|
798 |
+
tokens and manages a moving window (with user defined stride) for overflowing tokens.
|
799 |
+
"""
|
800 |
+
batch_outputs = {}
|
801 |
+
if answer is None:
|
802 |
+
answer = [None] * len(table)
|
803 |
+
for _table, _query, _answer in zip(table, query, answer):
|
804 |
+
text = self.prepare_table_query(
|
805 |
+
_table, _query, _answer, truncation_strategy=truncation_strategy, max_length=max_length
|
806 |
+
)
|
807 |
+
|
808 |
+
if self.do_lower_case:
|
809 |
+
text = text.lower()
|
810 |
+
|
811 |
+
tokens = self.tokenize(text)
|
812 |
+
outputs = self.prepare_for_model(
|
813 |
+
ids=self.convert_tokens_to_ids(tokens),
|
814 |
+
add_special_tokens=add_special_tokens,
|
815 |
+
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
|
816 |
+
truncation=truncation_strategy.value,
|
817 |
+
max_length=max_length,
|
818 |
+
stride=stride,
|
819 |
+
pad_to_multiple_of=None, # we pad in batch afterwards
|
820 |
+
return_attention_mask=False, # we pad in batch afterwards
|
821 |
+
return_token_type_ids=return_token_type_ids,
|
822 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
823 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
824 |
+
return_length=return_length,
|
825 |
+
return_tensors=None, # We convert the whole batch to tensors at the end
|
826 |
+
prepend_batch_axis=False,
|
827 |
+
verbose=verbose,
|
828 |
+
)
|
829 |
+
|
830 |
+
for key, value in outputs.items():
|
831 |
+
if key not in batch_outputs:
|
832 |
+
batch_outputs[key] = []
|
833 |
+
batch_outputs[key].append(value)
|
834 |
+
|
835 |
+
batch_outputs = self.pad(
|
836 |
+
batch_outputs,
|
837 |
+
padding=padding_strategy.value,
|
838 |
+
max_length=max_length,
|
839 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
840 |
+
return_attention_mask=return_attention_mask,
|
841 |
+
)
|
842 |
+
|
843 |
+
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
|
844 |
+
|
845 |
+
return batch_outputs
|
846 |
+
|
847 |
+
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
|
848 |
+
def encode(
|
849 |
+
self,
|
850 |
+
table: "pd.DataFrame",
|
851 |
+
query: Optional[TextInput] = None,
|
852 |
+
answer: Optional[str] = None,
|
853 |
+
add_special_tokens: bool = True,
|
854 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
855 |
+
truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
|
856 |
+
max_length: Optional[int] = None,
|
857 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
858 |
+
**kwargs,
|
859 |
+
) -> List[int]:
|
860 |
+
"""
|
861 |
+
Prepare a table, a string and possible answer for the model. This method does not return token type IDs,
|
862 |
+
attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build
|
863 |
+
your processing on your own, otherwise refer to `__call__`.
|
864 |
+
"""
|
865 |
+
encoded_inputs = self.encode_plus(
|
866 |
+
table,
|
867 |
+
query=query,
|
868 |
+
answer=answer,
|
869 |
+
add_special_tokens=add_special_tokens,
|
870 |
+
padding=padding,
|
871 |
+
truncation=truncation,
|
872 |
+
max_length=max_length,
|
873 |
+
return_tensors=return_tensors,
|
874 |
+
**kwargs,
|
875 |
+
)
|
876 |
+
|
877 |
+
return encoded_inputs["input_ids"]
|
878 |
+
|
879 |
+
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
880 |
+
def encode_plus(
|
881 |
+
self,
|
882 |
+
table: "pd.DataFrame",
|
883 |
+
query: Optional[TextInput] = None,
|
884 |
+
answer: Optional[str] = None,
|
885 |
+
add_special_tokens: bool = True,
|
886 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
887 |
+
truncation: Union[bool, str] = None,
|
888 |
+
max_length: Optional[int] = None,
|
889 |
+
pad_to_multiple_of: Optional[int] = None,
|
890 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
891 |
+
return_token_type_ids: Optional[bool] = None,
|
892 |
+
return_attention_mask: Optional[bool] = None,
|
893 |
+
return_special_tokens_mask: bool = False,
|
894 |
+
return_offsets_mapping: bool = False,
|
895 |
+
return_length: bool = False,
|
896 |
+
verbose: bool = True,
|
897 |
+
**kwargs,
|
898 |
+
) -> BatchEncoding:
|
899 |
+
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
|
900 |
+
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
|
901 |
+
padding=padding,
|
902 |
+
truncation=truncation,
|
903 |
+
max_length=max_length,
|
904 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
905 |
+
verbose=verbose,
|
906 |
+
**kwargs,
|
907 |
+
)
|
908 |
+
|
909 |
+
return self._encode_plus(
|
910 |
+
table=table,
|
911 |
+
query=query,
|
912 |
+
answer=answer,
|
913 |
+
add_special_tokens=add_special_tokens,
|
914 |
+
padding_strategy=padding_strategy,
|
915 |
+
truncation_strategy=truncation_strategy,
|
916 |
+
max_length=max_length,
|
917 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
918 |
+
return_tensors=return_tensors,
|
919 |
+
return_token_type_ids=return_token_type_ids,
|
920 |
+
return_attention_mask=return_attention_mask,
|
921 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
922 |
+
return_offsets_mapping=return_offsets_mapping,
|
923 |
+
return_length=return_length,
|
924 |
+
verbose=verbose,
|
925 |
+
**kwargs,
|
926 |
+
)
|
927 |
+
|
928 |
+
def _encode_plus(
|
929 |
+
self,
|
930 |
+
table: "pd.DataFrame",
|
931 |
+
query: Optional[TextInput] = None,
|
932 |
+
answer: Optional[str] = None,
|
933 |
+
add_special_tokens: bool = True,
|
934 |
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
935 |
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
936 |
+
max_length: Optional[int] = None,
|
937 |
+
stride: int = 0,
|
938 |
+
pad_to_multiple_of: Optional[int] = None,
|
939 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
940 |
+
return_token_type_ids: Optional[bool] = None,
|
941 |
+
return_attention_mask: Optional[bool] = None,
|
942 |
+
return_overflowing_tokens: bool = False,
|
943 |
+
return_special_tokens_mask: bool = False,
|
944 |
+
return_offsets_mapping: bool = False,
|
945 |
+
return_length: bool = False,
|
946 |
+
verbose: bool = True,
|
947 |
+
**kwargs,
|
948 |
+
) -> BatchEncoding:
|
949 |
+
if return_offsets_mapping:
|
950 |
+
raise NotImplementedError(
|
951 |
+
"return_offset_mapping is not available when using Python tokenizers. "
|
952 |
+
"To use this feature, change your tokenizer to one deriving from "
|
953 |
+
"transformers.PreTrainedTokenizerFast. "
|
954 |
+
"More information on available tokenizers at "
|
955 |
+
"https://github.com/huggingface/transformers/pull/2674"
|
956 |
+
)
|
957 |
+
|
958 |
+
text = self.prepare_table_query(
|
959 |
+
table, query, answer, truncation_strategy=truncation_strategy, max_length=max_length
|
960 |
+
)
|
961 |
+
|
962 |
+
# if necessary, perform lower case
|
963 |
+
if self.do_lower_case:
|
964 |
+
text = text.lower()
|
965 |
+
|
966 |
+
tokens = self.tokenize(text)
|
967 |
+
|
968 |
+
return self.prepare_for_model(
|
969 |
+
ids=self.convert_tokens_to_ids(tokens),
|
970 |
+
add_special_tokens=add_special_tokens,
|
971 |
+
padding=padding_strategy.value,
|
972 |
+
truncation=truncation_strategy.value,
|
973 |
+
max_length=max_length,
|
974 |
+
stride=stride,
|
975 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
976 |
+
return_tensors=return_tensors,
|
977 |
+
prepend_batch_axis=True,
|
978 |
+
return_attention_mask=return_attention_mask,
|
979 |
+
return_token_type_ids=return_token_type_ids,
|
980 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
981 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
982 |
+
return_length=return_length,
|
983 |
+
verbose=verbose,
|
984 |
+
)
|
985 |
+
|
986 |
+
def target_call_func(
|
987 |
+
self,
|
988 |
+
answer: Union[str, List[str]],
|
989 |
+
add_special_tokens: bool = True,
|
990 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
991 |
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
992 |
+
max_length: Optional[int] = None,
|
993 |
+
stride: int = 0,
|
994 |
+
pad_to_multiple_of: Optional[int] = None,
|
995 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
996 |
+
return_token_type_ids: Optional[bool] = None,
|
997 |
+
return_attention_mask: Optional[bool] = None,
|
998 |
+
return_overflowing_tokens: bool = False,
|
999 |
+
return_special_tokens_mask: bool = False,
|
1000 |
+
return_offsets_mapping: bool = False,
|
1001 |
+
return_length: bool = False,
|
1002 |
+
verbose: bool = True,
|
1003 |
+
**kwargs,
|
1004 |
+
) -> BatchEncoding:
|
1005 |
+
"""
|
1006 |
+
The method tokenizes and prepares the answer label for the model.
|
1007 |
+
|
1008 |
+
Args:
|
1009 |
+
answer (`str` or `List[str]`):
|
1010 |
+
Corresponding answer supervision to the queries for training the model.
|
1011 |
+
"""
|
1012 |
+
is_batched = isinstance(answer, (list, tuple))
|
1013 |
+
|
1014 |
+
if is_batched:
|
1015 |
+
return self.target_batch_encode_plus(
|
1016 |
+
answer=answer,
|
1017 |
+
add_special_tokens=add_special_tokens,
|
1018 |
+
padding=padding,
|
1019 |
+
truncation=truncation,
|
1020 |
+
max_length=max_length,
|
1021 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1022 |
+
return_tensors=return_tensors,
|
1023 |
+
return_token_type_ids=return_token_type_ids,
|
1024 |
+
return_attention_mask=return_attention_mask,
|
1025 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
1026 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
1027 |
+
return_offsets_mapping=return_offsets_mapping,
|
1028 |
+
return_length=return_length,
|
1029 |
+
verbose=verbose,
|
1030 |
+
**kwargs,
|
1031 |
+
)
|
1032 |
+
else:
|
1033 |
+
return self.target_encode_plus(
|
1034 |
+
answer=answer,
|
1035 |
+
add_special_tokens=add_special_tokens,
|
1036 |
+
padding=padding,
|
1037 |
+
truncation=truncation,
|
1038 |
+
max_length=max_length,
|
1039 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1040 |
+
return_tensors=return_tensors,
|
1041 |
+
return_token_type_ids=return_token_type_ids,
|
1042 |
+
return_attention_mask=return_attention_mask,
|
1043 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
1044 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
1045 |
+
return_offsets_mapping=return_offsets_mapping,
|
1046 |
+
return_length=return_length,
|
1047 |
+
verbose=verbose,
|
1048 |
+
**kwargs,
|
1049 |
+
)
|
1050 |
+
|
1051 |
+
def target_batch_encode_plus(
|
1052 |
+
self,
|
1053 |
+
answer: List[str],
|
1054 |
+
add_special_tokens: bool = True,
|
1055 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
1056 |
+
truncation: Union[bool, str] = None,
|
1057 |
+
max_length: Optional[int] = None,
|
1058 |
+
pad_to_multiple_of: Optional[int] = None,
|
1059 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
1060 |
+
return_token_type_ids: Optional[bool] = None,
|
1061 |
+
return_attention_mask: Optional[bool] = None,
|
1062 |
+
return_overflowing_tokens: bool = False,
|
1063 |
+
return_special_tokens_mask: bool = False,
|
1064 |
+
return_offsets_mapping: bool = False,
|
1065 |
+
return_length: bool = False,
|
1066 |
+
verbose: bool = True,
|
1067 |
+
**kwargs,
|
1068 |
+
) -> BatchEncoding:
|
1069 |
+
"""
|
1070 |
+
Prepare answer strings for the model.
|
1071 |
+
|
1072 |
+
Args:
|
1073 |
+
answer `List[str]`:
|
1074 |
+
Corresponding answer supervision to the queries for training the model.
|
1075 |
+
"""
|
1076 |
+
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
|
1077 |
+
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
|
1078 |
+
padding=padding,
|
1079 |
+
truncation=truncation,
|
1080 |
+
max_length=max_length,
|
1081 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1082 |
+
verbose=verbose,
|
1083 |
+
**kwargs,
|
1084 |
+
)
|
1085 |
+
|
1086 |
+
return self._target_batch_encode_plus(
|
1087 |
+
answer=answer,
|
1088 |
+
add_special_tokens=add_special_tokens,
|
1089 |
+
padding_strategy=padding_strategy,
|
1090 |
+
truncation_strategy=truncation_strategy,
|
1091 |
+
max_length=max_length,
|
1092 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1093 |
+
return_tensors=return_tensors,
|
1094 |
+
return_token_type_ids=return_token_type_ids,
|
1095 |
+
return_attention_mask=return_attention_mask,
|
1096 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
1097 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
1098 |
+
return_offsets_mapping=return_offsets_mapping,
|
1099 |
+
return_length=return_length,
|
1100 |
+
verbose=verbose,
|
1101 |
+
**kwargs,
|
1102 |
+
)
|
1103 |
+
|
1104 |
+
def _target_batch_encode_plus(
|
1105 |
+
self,
|
1106 |
+
answer: List[str],
|
1107 |
+
add_special_tokens: bool = True,
|
1108 |
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
1109 |
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
1110 |
+
max_length: Optional[int] = None,
|
1111 |
+
stride: int = 0,
|
1112 |
+
pad_to_multiple_of: Optional[int] = None,
|
1113 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
1114 |
+
return_token_type_ids: Optional[bool] = None,
|
1115 |
+
return_attention_mask: Optional[bool] = None,
|
1116 |
+
return_overflowing_tokens: bool = False,
|
1117 |
+
return_special_tokens_mask: bool = False,
|
1118 |
+
return_offsets_mapping: bool = False,
|
1119 |
+
return_length: bool = False,
|
1120 |
+
verbose: bool = True,
|
1121 |
+
**kwargs,
|
1122 |
+
) -> BatchEncoding:
|
1123 |
+
batch_outputs = {}
|
1124 |
+
for text in answer:
|
1125 |
+
if self.do_lower_case:
|
1126 |
+
text = text.lower()
|
1127 |
+
|
1128 |
+
tokens = self.tokenize(text)
|
1129 |
+
outputs = self.prepare_for_model(
|
1130 |
+
ids=self.convert_tokens_to_ids(tokens),
|
1131 |
+
add_special_tokens=add_special_tokens,
|
1132 |
+
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
|
1133 |
+
truncation=truncation_strategy.value,
|
1134 |
+
max_length=max_length,
|
1135 |
+
stride=stride,
|
1136 |
+
pad_to_multiple_of=None, # we pad in batch afterwards
|
1137 |
+
return_attention_mask=False, # we pad in batch afterwards
|
1138 |
+
return_token_type_ids=return_token_type_ids,
|
1139 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
1140 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
1141 |
+
return_length=return_length,
|
1142 |
+
return_tensors=None, # We convert the whole batch to tensors at the end
|
1143 |
+
prepend_batch_axis=False,
|
1144 |
+
verbose=verbose,
|
1145 |
+
)
|
1146 |
+
|
1147 |
+
for key, value in outputs.items():
|
1148 |
+
if key not in batch_outputs:
|
1149 |
+
batch_outputs[key] = []
|
1150 |
+
batch_outputs[key].append(value)
|
1151 |
+
|
1152 |
+
batch_outputs = self.pad(
|
1153 |
+
batch_outputs,
|
1154 |
+
padding=padding_strategy.value,
|
1155 |
+
max_length=max_length,
|
1156 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1157 |
+
return_attention_mask=return_attention_mask,
|
1158 |
+
)
|
1159 |
+
|
1160 |
+
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
|
1161 |
+
|
1162 |
+
return BatchEncoding(batch_outputs)
|
1163 |
+
|
1164 |
+
def target_encode(
|
1165 |
+
self,
|
1166 |
+
answer: str,
|
1167 |
+
add_special_tokens: bool = True,
|
1168 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
1169 |
+
truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
|
1170 |
+
max_length: Optional[int] = None,
|
1171 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
1172 |
+
**kwargs,
|
1173 |
+
) -> List[int]:
|
1174 |
+
"""
|
1175 |
+
Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc.
|
1176 |
+
which are necessary for the model to work correctly. Use this method if you want to build your processing on
|
1177 |
+
your own, otherwise refer to `__call__`.
|
1178 |
+
|
1179 |
+
Args:
|
1180 |
+
answer `str`:
|
1181 |
+
Corresponding answer supervision to the queries for training the model
|
1182 |
+
"""
|
1183 |
+
encoded_outputs = self.target_encode_plus(
|
1184 |
+
answer=answer,
|
1185 |
+
add_special_tokens=add_special_tokens,
|
1186 |
+
padding=padding,
|
1187 |
+
truncation=truncation,
|
1188 |
+
max_length=max_length,
|
1189 |
+
return_tensors=return_tensors,
|
1190 |
+
**kwargs,
|
1191 |
+
)
|
1192 |
+
|
1193 |
+
return encoded_outputs["input_ids"]
|
1194 |
+
|
1195 |
+
def target_encode_plus(
|
1196 |
+
self,
|
1197 |
+
answer: str,
|
1198 |
+
add_special_tokens: bool = True,
|
1199 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
1200 |
+
truncation: Union[bool, str] = None,
|
1201 |
+
max_length: Optional[int] = None,
|
1202 |
+
pad_to_multiple_of: Optional[int] = None,
|
1203 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
1204 |
+
return_token_type_ids: Optional[bool] = None,
|
1205 |
+
return_attention_mask: Optional[bool] = None,
|
1206 |
+
return_special_tokens_mask: bool = False,
|
1207 |
+
return_offsets_mapping: bool = False,
|
1208 |
+
return_length: bool = False,
|
1209 |
+
verbose: bool = True,
|
1210 |
+
**kwargs,
|
1211 |
+
) -> BatchEncoding:
|
1212 |
+
"""
|
1213 |
+
Prepare a answer string for the model.
|
1214 |
+
|
1215 |
+
Args:
|
1216 |
+
answer `str`:
|
1217 |
+
Corresponding answer supervision to the queries for training the model.
|
1218 |
+
"""
|
1219 |
+
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
|
1220 |
+
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
|
1221 |
+
padding=padding,
|
1222 |
+
truncation=truncation,
|
1223 |
+
max_length=max_length,
|
1224 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1225 |
+
verbose=verbose,
|
1226 |
+
**kwargs,
|
1227 |
+
)
|
1228 |
+
|
1229 |
+
return self._target_encode_plus(
|
1230 |
+
answer=answer,
|
1231 |
+
add_special_tokens=add_special_tokens,
|
1232 |
+
padding_strategy=padding_strategy,
|
1233 |
+
truncation_strategy=truncation_strategy,
|
1234 |
+
max_length=max_length,
|
1235 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1236 |
+
return_tensors=return_tensors,
|
1237 |
+
return_token_type_ids=return_token_type_ids,
|
1238 |
+
return_attention_mask=return_attention_mask,
|
1239 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
1240 |
+
return_offsets_mapping=return_offsets_mapping,
|
1241 |
+
return_length=return_length,
|
1242 |
+
verbose=verbose,
|
1243 |
+
**kwargs,
|
1244 |
+
)
|
1245 |
+
|
1246 |
+
def _target_encode_plus(
|
1247 |
+
self,
|
1248 |
+
answer: str,
|
1249 |
+
add_special_tokens: bool = True,
|
1250 |
+
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
1251 |
+
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
1252 |
+
max_length: Optional[int] = None,
|
1253 |
+
stride: int = 0,
|
1254 |
+
pad_to_multiple_of: Optional[int] = None,
|
1255 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
1256 |
+
return_token_type_ids: Optional[bool] = None,
|
1257 |
+
return_attention_mask: Optional[bool] = None,
|
1258 |
+
return_overflowing_tokens: bool = False,
|
1259 |
+
return_special_tokens_mask: bool = False,
|
1260 |
+
return_offsets_mapping: bool = False,
|
1261 |
+
return_length: bool = False,
|
1262 |
+
verbose: bool = True,
|
1263 |
+
**kwargs,
|
1264 |
+
) -> BatchEncoding:
|
1265 |
+
if return_offsets_mapping:
|
1266 |
+
raise NotImplementedError(
|
1267 |
+
"return_offset_mapping is not available when using Python tokenizers. "
|
1268 |
+
"To use this feature, change your tokenizer to one deriving from "
|
1269 |
+
"transformers.PreTrainedTokenizerFast. "
|
1270 |
+
"More information on available tokenizers at "
|
1271 |
+
"https://github.com/huggingface/transformers/pull/2674"
|
1272 |
+
)
|
1273 |
+
|
1274 |
+
text = answer
|
1275 |
+
|
1276 |
+
# if necessary, perform lower case
|
1277 |
+
if self.do_lower_case:
|
1278 |
+
text = text.lower()
|
1279 |
+
|
1280 |
+
tokens = self.tokenize(text)
|
1281 |
+
|
1282 |
+
return self.prepare_for_model(
|
1283 |
+
ids=self.convert_tokens_to_ids(tokens),
|
1284 |
+
add_special_tokens=add_special_tokens,
|
1285 |
+
padding=padding_strategy.value,
|
1286 |
+
truncation=truncation_strategy.value,
|
1287 |
+
max_length=max_length,
|
1288 |
+
stride=stride,
|
1289 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
1290 |
+
return_tensors=return_tensors,
|
1291 |
+
prepend_batch_axis=True,
|
1292 |
+
return_attention_mask=return_attention_mask,
|
1293 |
+
return_token_type_ids=return_token_type_ids,
|
1294 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
1295 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
1296 |
+
return_length=return_length,
|
1297 |
+
verbose=verbose,
|
1298 |
+
)
|
1299 |
+
|
1300 |
+
def prepare_table_query(
|
1301 |
+
self,
|
1302 |
+
table,
|
1303 |
+
query,
|
1304 |
+
answer=None,
|
1305 |
+
truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy],
|
1306 |
+
max_length=None,
|
1307 |
+
):
|
1308 |
+
"""
|
1309 |
+
This method can be used to linearize a table and add a corresponding query.
|
1310 |
+
|
1311 |
+
Optionally, it also handles truncation of the table (cells).
|
1312 |
+
|
1313 |
+
An answer can be provided for more precise truncation.
|
1314 |
+
"""
|
1315 |
+
if not table.empty:
|
1316 |
+
# step 1: create table dictionary
|
1317 |
+
table_content = {"header": list(table.columns), "rows": [list(row.values) for i, row in table.iterrows()]}
|
1318 |
+
|
1319 |
+
# step 2: modify table internally
|
1320 |
+
# always truncate table cells based on self.max_cell_length
|
1321 |
+
# optionally truncate rows if truncation_strategy is set to it
|
1322 |
+
self.truncate_table_cells(table_content, query, answer)
|
1323 |
+
if truncation_strategy == TapexTruncationStrategy.DROP_ROWS_TO_FIT:
|
1324 |
+
self.truncate_table_rows(table_content, query, answer, max_length=max_length)
|
1325 |
+
|
1326 |
+
# step 3: linearize table
|
1327 |
+
linear_table = self.table_linearize.process_table(table_content)
|
1328 |
+
else:
|
1329 |
+
linear_table = ""
|
1330 |
+
|
1331 |
+
if linear_table == "":
|
1332 |
+
logger.warning(
|
1333 |
+
"You provide an empty table, or all cells contain much tokens (e.g., >= 1024 tokens). "
|
1334 |
+
+ f"Please carefully check the corresponding table with the query : {query}."
|
1335 |
+
)
|
1336 |
+
if query == "":
|
1337 |
+
logger.warning("You provide nothing to query with respect to the table.")
|
1338 |
+
# step 4: concatenate query with linear_table
|
1339 |
+
separator = " " if query and linear_table else ""
|
1340 |
+
joint_input = (query + separator + linear_table) if query else linear_table
|
1341 |
+
|
1342 |
+
return joint_input
|
1343 |
+
|
1344 |
+
def truncate_table_cells(self, table_content: Dict, question: str, answer: List):
|
1345 |
+
# TODO (Qian): is it possible to revert the original cell if it is in the final answer?
|
1346 |
+
cell_mapping = {}
|
1347 |
+
for row in table_content["rows"]:
|
1348 |
+
for i, cell in enumerate(row):
|
1349 |
+
truncate_cell = self.truncate_cell(cell)
|
1350 |
+
if truncate_cell is not None:
|
1351 |
+
cell_mapping[cell] = truncate_cell
|
1352 |
+
row[i] = truncate_cell
|
1353 |
+
|
1354 |
+
# modify the answer list
|
1355 |
+
if answer is not None:
|
1356 |
+
for i, case in enumerate(answer):
|
1357 |
+
if case in cell_mapping.keys():
|
1358 |
+
answer[i] = cell_mapping[case]
|
1359 |
+
|
1360 |
+
def truncate_cell(self, cell_value):
|
1361 |
+
# do not process on these cases
|
1362 |
+
if isinstance(cell_value, int) or isinstance(cell_value, float):
|
1363 |
+
return cell_value
|
1364 |
+
if cell_value.strip() != "":
|
1365 |
+
try_tokens = self.tokenize(cell_value)
|
1366 |
+
if len(try_tokens) >= self.max_cell_length:
|
1367 |
+
retain_tokens = try_tokens[: self.max_cell_length]
|
1368 |
+
retain_cell_value = self.convert_tokens_to_string(retain_tokens)
|
1369 |
+
return retain_cell_value
|
1370 |
+
else:
|
1371 |
+
return None
|
1372 |
+
else:
|
1373 |
+
return cell_value
|
1374 |
+
|
1375 |
+
def truncate_table_rows(
|
1376 |
+
self, table_content: Dict, question: str, answer: Optional[Union[str, List[str]]] = None, max_length=None
|
1377 |
+
):
|
1378 |
+
"""
|
1379 |
+
Args:
|
1380 |
+
table_content:
|
1381 |
+
{"header": xxx, "rows": xxx, "id" (Optionally): xxx}
|
1382 |
+
|
1383 |
+
question:
|
1384 |
+
natural language sentence
|
1385 |
+
|
1386 |
+
answer:
|
1387 |
+
if for training, is the supervision; otherwise will be empty
|
1388 |
+
"""
|
1389 |
+
delete_ratio, remain_token_len = self.estimate_delete_ratio(table_content, question, max_length)
|
1390 |
+
# randomly delete unrelated rows
|
1391 |
+
self.delete_unrelated_rows(table_content, question, answer, delete_ratio)
|
1392 |
+
# guarantee the result < max_length
|
1393 |
+
maximum_keep_rows = 0
|
1394 |
+
for ind, row_example in enumerate(table_content["rows"]):
|
1395 |
+
value_string = self.table_linearize.process_row(row_example, ind + 1)
|
1396 |
+
value_token_len = len(self.tokenize(value_string))
|
1397 |
+
# over the size limit, and take action
|
1398 |
+
if value_token_len > remain_token_len:
|
1399 |
+
break
|
1400 |
+
remain_token_len -= value_token_len
|
1401 |
+
maximum_keep_rows += 1
|
1402 |
+
del table_content["rows"][maximum_keep_rows:]
|
1403 |
+
|
1404 |
+
def estimate_delete_ratio(self, table_content: Dict, question: str, max_length=None):
|
1405 |
+
if "header" not in table_content or "rows" not in table_content:
|
1406 |
+
raise ValueError("The table content should contain both 'header' and 'rows' keys.")
|
1407 |
+
# calculate the tokens of header, special tokens will only be pre-prepended into question
|
1408 |
+
question_tokens = self.tokenize(question, add_special_tokens=True)
|
1409 |
+
# calculate the tokens of header
|
1410 |
+
header_string = self.table_linearize.process_header(table_content["header"])
|
1411 |
+
header_tokens = self.tokenize(header_string, add_special_tokens=False)
|
1412 |
+
# split all cell values into tokens and see how many can be accommodated
|
1413 |
+
used_token_len = len(question_tokens) + len(header_tokens)
|
1414 |
+
# remaining token space for rows
|
1415 |
+
remain_token_len = max_length - used_token_len
|
1416 |
+
|
1417 |
+
value_string = ""
|
1418 |
+
for _, row_example in enumerate(table_content["rows"]):
|
1419 |
+
# use a general index to roughly estimate the overall token len
|
1420 |
+
value_string += self.table_linearize.process_row(row_example, 100) + " "
|
1421 |
+
value_token_len = len(self.tokenize(value_string))
|
1422 |
+
|
1423 |
+
if value_token_len < remain_token_len:
|
1424 |
+
# no row will be deleted
|
1425 |
+
return 0.0, remain_token_len
|
1426 |
+
else:
|
1427 |
+
# calc a roughly delete rate
|
1428 |
+
return 1.0 - remain_token_len / value_token_len, remain_token_len
|
1429 |
+
|
1430 |
+
def delete_unrelated_rows(self, table_content: Dict, question: str, answer: List, delete_ratio: float):
|
1431 |
+
"""
|
1432 |
+
The argument answer is used only during training.
|
1433 |
+
"""
|
1434 |
+
truncated_unrelated_indices = []
|
1435 |
+
related_indices = []
|
1436 |
+
if answer is None or len(answer) == 0:
|
1437 |
+
answer_set = set()
|
1438 |
+
else:
|
1439 |
+
answer_set = {ans_ex.lower() for ans_ex in answer}
|
1440 |
+
# add question key words into answer set
|
1441 |
+
if question is not None:
|
1442 |
+
answer_set.update(question.split())
|
1443 |
+
question_set = set(question.strip("?!.,").split(" "))
|
1444 |
+
row_max_len = len(table_content["rows"])
|
1445 |
+
for _row_idx, row in enumerate(table_content["rows"]):
|
1446 |
+
lower_row = {str(cell).lower() for cell in row}
|
1447 |
+
if len(lower_row & answer_set) == 0 and len(lower_row & question_set) == 0:
|
1448 |
+
truncated_unrelated_indices.append(_row_idx)
|
1449 |
+
else:
|
1450 |
+
# add neighbours to preserve information aggressively
|
1451 |
+
related_indices.extend([_row_idx - 2, _row_idx - 1, _row_idx, _row_idx + 1, _row_idx + 2])
|
1452 |
+
|
1453 |
+
# remove the neighbours
|
1454 |
+
truncated_unrelated_indices = [
|
1455 |
+
_row_idx for _row_idx in truncated_unrelated_indices if _row_idx not in related_indices
|
1456 |
+
]
|
1457 |
+
# select some cases to drop
|
1458 |
+
drop_items = min(len(truncated_unrelated_indices), int(len(table_content["rows"]) * delete_ratio))
|
1459 |
+
drop_row_indices = random.choices(truncated_unrelated_indices, k=drop_items)
|
1460 |
+
|
1461 |
+
for _row_idx in reversed(range(row_max_len)):
|
1462 |
+
if _row_idx in drop_row_indices:
|
1463 |
+
del table_content["rows"][_row_idx]
|
1464 |
+
|
1465 |
+
# only when the drop ratio is too large, logging for warning.
|
1466 |
+
if "id" in table_content and len(drop_row_indices) > 0:
|
1467 |
+
logger.warning("Delete {:.2f} rows in table {}".format(len(drop_row_indices), table_content["id"]))
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {
|
20 |
+
"configuration_trajectory_transformer": [
|
21 |
+
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
22 |
+
"TrajectoryTransformerConfig",
|
23 |
+
],
|
24 |
+
}
|
25 |
+
|
26 |
+
try:
|
27 |
+
if not is_torch_available():
|
28 |
+
raise OptionalDependencyNotAvailable()
|
29 |
+
except OptionalDependencyNotAvailable:
|
30 |
+
pass
|
31 |
+
else:
|
32 |
+
_import_structure["modeling_trajectory_transformer"] = [
|
33 |
+
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
34 |
+
"TrajectoryTransformerModel",
|
35 |
+
"TrajectoryTransformerPreTrainedModel",
|
36 |
+
"load_tf_weights_in_trajectory_transformer",
|
37 |
+
]
|
38 |
+
|
39 |
+
|
40 |
+
if TYPE_CHECKING:
|
41 |
+
from .configuration_trajectory_transformer import (
|
42 |
+
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
43 |
+
TrajectoryTransformerConfig,
|
44 |
+
)
|
45 |
+
|
46 |
+
try:
|
47 |
+
if not is_torch_available():
|
48 |
+
raise OptionalDependencyNotAvailable()
|
49 |
+
except OptionalDependencyNotAvailable:
|
50 |
+
pass
|
51 |
+
else:
|
52 |
+
from .modeling_trajectory_transformer import (
|
53 |
+
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
54 |
+
TrajectoryTransformerModel,
|
55 |
+
TrajectoryTransformerPreTrainedModel,
|
56 |
+
load_tf_weights_in_trajectory_transformer,
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
else:
|
61 |
+
import sys
|
62 |
+
|
63 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc
ADDED
Binary file (6.36 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
ADDED
Binary file (1.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc
ADDED
Binary file (19 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" TrajectoryTransformer model configuration"""
|
16 |
+
|
17 |
+
from ....configuration_utils import PretrainedConfig
|
18 |
+
from ....utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from .._archive_maps import TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class TrajectoryTransformerConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to
|
30 |
+
instantiate an TrajectoryTransformer model according to the specified arguments, defining the model architecture.
|
31 |
+
Instantiating a configuration with the defaults will yield a similar configuration to that of the
|
32 |
+
TrajectoryTransformer
|
33 |
+
[CarlCochet/trajectory-transformer-halfcheetah-medium-v2](https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2)
|
34 |
+
architecture.
|
35 |
+
|
36 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
+
documentation from [`PretrainedConfig`] for more information.
|
38 |
+
|
39 |
+
|
40 |
+
Args:
|
41 |
+
vocab_size (`int`, *optional*, defaults to 100):
|
42 |
+
Vocabulary size of the TrajectoryTransformer model. Defines the number of different tokens that can be
|
43 |
+
represented by the `trajectories` passed when calling [`TrajectoryTransformerModel`]
|
44 |
+
action_weight (`int`, *optional*, defaults to 5):
|
45 |
+
Weight of the action in the loss function
|
46 |
+
reward_weight (`int`, *optional*, defaults to 1):
|
47 |
+
Weight of the reward in the loss function
|
48 |
+
value_weight (`int`, *optional*, defaults to 1):
|
49 |
+
Weight of the value in the loss function
|
50 |
+
block_size (`int`, *optional*, defaults to 249):
|
51 |
+
Size of the blocks in the trajectory transformer.
|
52 |
+
action_dim (`int`, *optional*, defaults to 6):
|
53 |
+
Dimension of the action space.
|
54 |
+
observation_dim (`int`, *optional*, defaults to 17):
|
55 |
+
Dimension of the observation space.
|
56 |
+
transition_dim (`int`, *optional*, defaults to 25):
|
57 |
+
Dimension of the transition space.
|
58 |
+
n_layer (`int`, *optional*, defaults to 4):
|
59 |
+
Number of hidden layers in the Transformer encoder.
|
60 |
+
n_head (`int`, *optional*, defaults to 4):
|
61 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
62 |
+
n_embd (`int`, *optional*, defaults to 128):
|
63 |
+
Dimensionality of the embeddings and hidden states.
|
64 |
+
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
65 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
66 |
+
embd_pdrop (`int`, *optional*, defaults to 0.1):
|
67 |
+
The dropout ratio for the embeddings.
|
68 |
+
attn_pdrop (`float`, *optional*, defaults to 0.1):
|
69 |
+
The dropout ratio for the attention.
|
70 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
71 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
72 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
73 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
74 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
75 |
+
just in case (e.g., 512 or 1024 or 2048).
|
76 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
77 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
78 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
79 |
+
The epsilon used by the layer normalization layers.
|
80 |
+
kaiming_initializer_range (`float, *optional*, defaults to 1):
|
81 |
+
A coefficient scaling the negative slope of the kaiming initializer rectifier for EinLinear layers.
|
82 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
83 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
84 |
+
relevant if `config.is_decoder=True`.
|
85 |
+
Example:
|
86 |
+
|
87 |
+
```python
|
88 |
+
>>> from transformers import TrajectoryTransformerConfig, TrajectoryTransformerModel
|
89 |
+
|
90 |
+
>>> # Initializing a TrajectoryTransformer CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
|
91 |
+
>>> configuration = TrajectoryTransformerConfig()
|
92 |
+
|
93 |
+
>>> # Initializing a model (with random weights) from the CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
|
94 |
+
>>> model = TrajectoryTransformerModel(configuration)
|
95 |
+
|
96 |
+
>>> # Accessing the model configuration
|
97 |
+
>>> configuration = model.config
|
98 |
+
```"""
|
99 |
+
|
100 |
+
model_type = "trajectory_transformer"
|
101 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
102 |
+
attribute_map = {
|
103 |
+
"hidden_size": "n_embd",
|
104 |
+
"num_attention_heads": "n_head",
|
105 |
+
"num_hidden_layers": "n_layer",
|
106 |
+
}
|
107 |
+
|
108 |
+
def __init__(
|
109 |
+
self,
|
110 |
+
vocab_size=100,
|
111 |
+
action_weight=5,
|
112 |
+
reward_weight=1,
|
113 |
+
value_weight=1,
|
114 |
+
block_size=249,
|
115 |
+
action_dim=6,
|
116 |
+
observation_dim=17,
|
117 |
+
transition_dim=25,
|
118 |
+
n_layer=4,
|
119 |
+
n_head=4,
|
120 |
+
n_embd=128,
|
121 |
+
embd_pdrop=0.1,
|
122 |
+
attn_pdrop=0.1,
|
123 |
+
resid_pdrop=0.1,
|
124 |
+
learning_rate=0.0006,
|
125 |
+
max_position_embeddings=512,
|
126 |
+
initializer_range=0.02,
|
127 |
+
layer_norm_eps=1e-12,
|
128 |
+
kaiming_initializer_range=1,
|
129 |
+
use_cache=True,
|
130 |
+
pad_token_id=1,
|
131 |
+
bos_token_id=50256,
|
132 |
+
eos_token_id=50256,
|
133 |
+
**kwargs,
|
134 |
+
):
|
135 |
+
self.vocab_size = vocab_size
|
136 |
+
self.action_weight = action_weight
|
137 |
+
self.reward_weight = reward_weight
|
138 |
+
self.value_weight = value_weight
|
139 |
+
self.max_position_embeddings = max_position_embeddings
|
140 |
+
self.block_size = block_size
|
141 |
+
self.action_dim = action_dim
|
142 |
+
self.observation_dim = observation_dim
|
143 |
+
self.transition_dim = transition_dim
|
144 |
+
self.learning_rate = learning_rate
|
145 |
+
self.n_layer = n_layer
|
146 |
+
self.n_head = n_head
|
147 |
+
self.n_embd = n_embd
|
148 |
+
self.embd_pdrop = embd_pdrop
|
149 |
+
self.attn_pdrop = attn_pdrop
|
150 |
+
self.resid_pdrop = resid_pdrop
|
151 |
+
self.initializer_range = initializer_range
|
152 |
+
self.layer_norm_eps = layer_norm_eps
|
153 |
+
self.kaiming_initializer_range = kaiming_initializer_range
|
154 |
+
self.use_cache = use_cache
|
155 |
+
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" TrajectoryTransformer pytorch checkpoint conversion"""
|
16 |
+
|
17 |
+
import torch
|
18 |
+
import trajectory.utils as utils
|
19 |
+
|
20 |
+
from transformers import TrajectoryTransformerModel
|
21 |
+
|
22 |
+
|
23 |
+
class Parser(utils.Parser):
|
24 |
+
dataset: str = "halfcheetah-medium-expert-v2"
|
25 |
+
config: str = "config.offline"
|
26 |
+
|
27 |
+
|
28 |
+
def convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(logbase, dataset, loadpath, epoch, device):
|
29 |
+
"""Converting Sequential blocks to ModuleList"""
|
30 |
+
|
31 |
+
gpt, gpt_epoch = utils.load_model(logbase, dataset, loadpath, epoch=epoch, device=device)
|
32 |
+
trajectory_transformer = TrajectoryTransformerModel(gpt.config)
|
33 |
+
|
34 |
+
trajectory_transformer.tok_emb.load_state_dict(gpt.tok_emb.state_dict())
|
35 |
+
trajectory_transformer.pos_emb = gpt.pos_emb
|
36 |
+
trajectory_transformer.drop.load_state_dict(gpt.drop.state_dict())
|
37 |
+
trajectory_transformer.ln_f.load_state_dict(gpt.ln_f.state_dict())
|
38 |
+
trajectory_transformer.head.load_state_dict(gpt.head.state_dict())
|
39 |
+
|
40 |
+
for i, block in enumerate(gpt.blocks):
|
41 |
+
trajectory_transformer.blocks[i].ln1.load_state_dict(gpt.blocks[i].ln1.state_dict())
|
42 |
+
trajectory_transformer.blocks[i].ln2.load_state_dict(gpt.blocks[i].ln2.state_dict())
|
43 |
+
trajectory_transformer.blocks[i].attn.load_state_dict(gpt.blocks[i].attn.state_dict())
|
44 |
+
|
45 |
+
trajectory_transformer.blocks[i].l1.load_state_dict(gpt.blocks[i].mlp[0].state_dict())
|
46 |
+
trajectory_transformer.blocks[i].act.load_state_dict(gpt.blocks[i].mlp[1].state_dict())
|
47 |
+
trajectory_transformer.blocks[i].l2.load_state_dict(gpt.blocks[i].mlp[2].state_dict())
|
48 |
+
trajectory_transformer.blocks[i].drop.load_state_dict(gpt.blocks[i].mlp[3].state_dict())
|
49 |
+
|
50 |
+
torch.save(trajectory_transformer.state_dict(), "pytorch_model.bin")
|
51 |
+
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
"""
|
55 |
+
To run this script you will need to install the original repository to run the original model. You can find it
|
56 |
+
here: https://github.com/jannerm/trajectory-transformer From this repository code you can also download the
|
57 |
+
original pytorch checkpoints.
|
58 |
+
|
59 |
+
Run with the command:
|
60 |
+
|
61 |
+
```sh
|
62 |
+
>>> python convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py --dataset <dataset_name>
|
63 |
+
... --gpt_loadpath <path_to_original_pytorch_checkpoint>
|
64 |
+
```
|
65 |
+
"""
|
66 |
+
|
67 |
+
args = Parser().parse_args("plan")
|
68 |
+
convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(
|
69 |
+
args.logbase, args.dataset, args.gpt_loadpath, args.gpt_epoch, args.device
|
70 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
ADDED
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch TrajectoryTransformer model."""
|
16 |
+
|
17 |
+
import math
|
18 |
+
import os
|
19 |
+
from dataclasses import dataclass
|
20 |
+
from typing import Optional, Tuple, Union
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
import torch
|
24 |
+
import torch.utils.checkpoint
|
25 |
+
from torch import nn
|
26 |
+
from torch.nn import functional as F
|
27 |
+
|
28 |
+
from ....modeling_utils import PreTrainedModel
|
29 |
+
from ....utils import (
|
30 |
+
ModelOutput,
|
31 |
+
add_start_docstrings,
|
32 |
+
add_start_docstrings_to_model_forward,
|
33 |
+
logging,
|
34 |
+
replace_return_docstrings,
|
35 |
+
)
|
36 |
+
from .configuration_trajectory_transformer import TrajectoryTransformerConfig
|
37 |
+
|
38 |
+
|
39 |
+
logger = logging.get_logger(__name__)
|
40 |
+
|
41 |
+
_CHECKPOINT_FOR_DOC = "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
|
42 |
+
_CONFIG_FOR_DOC = "TrajectoryTransformerConfig"
|
43 |
+
|
44 |
+
|
45 |
+
from .._archive_maps import TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
46 |
+
|
47 |
+
|
48 |
+
def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path):
|
49 |
+
"""Load tf checkpoints in a pytorch model."""
|
50 |
+
try:
|
51 |
+
import re
|
52 |
+
|
53 |
+
import numpy as np
|
54 |
+
import tensorflow as tf
|
55 |
+
except ImportError:
|
56 |
+
logger.error(
|
57 |
+
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
|
58 |
+
"https://www.tensorflow.org/install/ for installation instructions."
|
59 |
+
)
|
60 |
+
raise
|
61 |
+
tf_path = os.path.abspath(tf_checkpoint_path)
|
62 |
+
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
|
63 |
+
# Load weights from TF model
|
64 |
+
init_vars = tf.train.list_variables(tf_path)
|
65 |
+
names = []
|
66 |
+
arrays = []
|
67 |
+
for name, shape in init_vars:
|
68 |
+
logger.info(f"Loading TF weight {name} with shape {shape}")
|
69 |
+
array = tf.train.load_variable(tf_path, name)
|
70 |
+
names.append(name)
|
71 |
+
arrays.append(array)
|
72 |
+
|
73 |
+
for name, array in zip(names, arrays):
|
74 |
+
name = name.split("/")
|
75 |
+
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
|
76 |
+
# which are not required for using pretrained model
|
77 |
+
if any(
|
78 |
+
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
|
79 |
+
for n in name
|
80 |
+
):
|
81 |
+
logger.info(f"Skipping {'/'.join(name)}")
|
82 |
+
continue
|
83 |
+
pointer = model
|
84 |
+
for m_name in name:
|
85 |
+
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
|
86 |
+
scope_names = re.split(r"_(\d+)", m_name)
|
87 |
+
else:
|
88 |
+
scope_names = [m_name]
|
89 |
+
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
|
90 |
+
pointer = getattr(pointer, "weight")
|
91 |
+
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
|
92 |
+
pointer = getattr(pointer, "bias")
|
93 |
+
elif scope_names[0] == "output_weights":
|
94 |
+
pointer = getattr(pointer, "weight")
|
95 |
+
elif scope_names[0] == "squad":
|
96 |
+
pointer = getattr(pointer, "classifier")
|
97 |
+
else:
|
98 |
+
try:
|
99 |
+
pointer = getattr(pointer, scope_names[0])
|
100 |
+
except AttributeError:
|
101 |
+
logger.info(f"Skipping {'/'.join(name)}")
|
102 |
+
continue
|
103 |
+
if len(scope_names) >= 2:
|
104 |
+
num = int(scope_names[1])
|
105 |
+
pointer = pointer[num]
|
106 |
+
if m_name[-11:] == "_embeddings":
|
107 |
+
pointer = getattr(pointer, "weight")
|
108 |
+
elif m_name == "kernel":
|
109 |
+
array = np.transpose(array)
|
110 |
+
try:
|
111 |
+
if pointer.shape != array.shape:
|
112 |
+
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
|
113 |
+
except AssertionError as e:
|
114 |
+
e.args += (pointer.shape, array.shape)
|
115 |
+
raise
|
116 |
+
logger.info(f"Initialize PyTorch weight {name}")
|
117 |
+
pointer.data = torch.from_numpy(array)
|
118 |
+
return model
|
119 |
+
|
120 |
+
|
121 |
+
@dataclass
|
122 |
+
class TrajectoryTransformerOutput(ModelOutput):
|
123 |
+
"""
|
124 |
+
Base class for model's outputs that also contains a pooling of the last hidden states.
|
125 |
+
|
126 |
+
Args:
|
127 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
128 |
+
Language modeling loss.
|
129 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
130 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
131 |
+
past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
132 |
+
Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
|
133 |
+
sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the
|
134 |
+
attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
135 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
136 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
137 |
+
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
|
138 |
+
plus the initial embedding outputs.
|
139 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
140 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
141 |
+
sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average
|
142 |
+
in the self-attention heads.
|
143 |
+
"""
|
144 |
+
|
145 |
+
loss: Optional[torch.FloatTensor] = None
|
146 |
+
logits: torch.FloatTensor = None
|
147 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
148 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
149 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
150 |
+
|
151 |
+
|
152 |
+
class TrajectoryTransformerPreTrainedModel(PreTrainedModel):
|
153 |
+
"""
|
154 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
155 |
+
models.
|
156 |
+
"""
|
157 |
+
|
158 |
+
config_class = TrajectoryTransformerConfig
|
159 |
+
load_tf_weights = load_tf_weights_in_trajectory_transformer
|
160 |
+
base_model_prefix = "trajectory_transformer"
|
161 |
+
main_input_name = "trajectories"
|
162 |
+
supports_gradient_checkpointing = True
|
163 |
+
|
164 |
+
def _init_weights(self, module):
|
165 |
+
if isinstance(module, (nn.Linear, nn.Embedding)):
|
166 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
167 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
168 |
+
module.bias.data.zero_()
|
169 |
+
elif isinstance(module, nn.LayerNorm):
|
170 |
+
module.bias.data.zero_()
|
171 |
+
module.weight.data.fill_(1.0)
|
172 |
+
elif isinstance(module, EinLinear):
|
173 |
+
for i in range(module.n_models):
|
174 |
+
nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range)
|
175 |
+
if module.bias is not None:
|
176 |
+
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight[i])
|
177 |
+
bound = (1 / math.sqrt(fan_in)) * self.config.initializer_range
|
178 |
+
nn.init.uniform_(module.bias[i], -bound, bound)
|
179 |
+
|
180 |
+
|
181 |
+
TRAJECTORY_TRANSFORMER_START_DOCSTRING = r"""
|
182 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
183 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
184 |
+
behavior.
|
185 |
+
|
186 |
+
Parameters:
|
187 |
+
config ([`TrajectoryTransformerConfig`]): Model configuration class with all the parameters of the model.
|
188 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
189 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
190 |
+
"""
|
191 |
+
|
192 |
+
TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING = r"""
|
193 |
+
Args:
|
194 |
+
trajectories (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
195 |
+
Batch of trajectories, where a trajectory is a sequence of states, actions and rewards.
|
196 |
+
past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`, *optional*):
|
197 |
+
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
|
198 |
+
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
|
199 |
+
their past given to this model should not be passed as `input_ids` as they have already been computed.
|
200 |
+
targets (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
201 |
+
Desired targets used to compute the loss.
|
202 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
203 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
204 |
+
|
205 |
+
- 1 for tokens that are **not masked**,
|
206 |
+
- 0 for tokens that are **masked**.
|
207 |
+
|
208 |
+
[What are attention masks?](../glossary#attention-mask)
|
209 |
+
use_cache (`bool`, *optional*):
|
210 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
211 |
+
`past_key_values`).
|
212 |
+
output_attentions (`bool`, *optional*):
|
213 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
214 |
+
tensors for more detail.
|
215 |
+
output_hidden_states (`bool`, *optional*):
|
216 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
217 |
+
more detail.
|
218 |
+
return_dict (`bool`, *optional*):
|
219 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
220 |
+
"""
|
221 |
+
|
222 |
+
|
223 |
+
class EinLinear(nn.Module):
|
224 |
+
def __init__(self, n_models, in_features, out_features, bias):
|
225 |
+
super().__init__()
|
226 |
+
self.n_models = n_models
|
227 |
+
self.out_features = out_features
|
228 |
+
self.in_features = in_features
|
229 |
+
self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features))
|
230 |
+
if bias:
|
231 |
+
self.bias = nn.Parameter(torch.Tensor(n_models, out_features))
|
232 |
+
else:
|
233 |
+
self.register_parameter("bias", None)
|
234 |
+
|
235 |
+
def reset_parameters(self):
|
236 |
+
for i in range(self.n_models):
|
237 |
+
nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5))
|
238 |
+
if self.bias is not None:
|
239 |
+
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i])
|
240 |
+
bound = 1 / math.sqrt(fan_in)
|
241 |
+
nn.init.uniform_(self.bias[i], -bound, bound)
|
242 |
+
|
243 |
+
def forward(self, input):
|
244 |
+
"""
|
245 |
+
Args:
|
246 |
+
input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`):
|
247 |
+
The input to the layer.
|
248 |
+
"""
|
249 |
+
# [ batch_size x n_models x output_dim ]
|
250 |
+
output = torch.einsum("eoi,bei->beo", self.weight, input)
|
251 |
+
if self.bias is not None:
|
252 |
+
raise RuntimeError()
|
253 |
+
return output
|
254 |
+
|
255 |
+
|
256 |
+
class CausalSelfAttention(nn.Module):
|
257 |
+
def __init__(self, config):
|
258 |
+
super().__init__()
|
259 |
+
|
260 |
+
if config.n_embd % config.n_head != 0:
|
261 |
+
raise ValueError(f"n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})")
|
262 |
+
|
263 |
+
# key, query, value projections for all heads
|
264 |
+
self.key = nn.Linear(config.n_embd, config.n_embd)
|
265 |
+
self.query = nn.Linear(config.n_embd, config.n_embd)
|
266 |
+
self.value = nn.Linear(config.n_embd, config.n_embd)
|
267 |
+
|
268 |
+
# regularization
|
269 |
+
self.attn_drop = nn.Dropout(config.attn_pdrop)
|
270 |
+
self.resid_drop = nn.Dropout(config.resid_pdrop)
|
271 |
+
|
272 |
+
# output projection
|
273 |
+
self.proj = nn.Linear(config.n_embd, config.n_embd)
|
274 |
+
|
275 |
+
# causal mask to ensure that attention is only applied to the left in the input sequence
|
276 |
+
self.register_buffer(
|
277 |
+
"mask",
|
278 |
+
torch.tril(torch.ones(config.block_size, config.block_size)).view(
|
279 |
+
1, 1, config.block_size, config.block_size
|
280 |
+
),
|
281 |
+
persistent=False,
|
282 |
+
)
|
283 |
+
|
284 |
+
# mask previous value estimates
|
285 |
+
joined_dim = config.observation_dim + config.action_dim + 2
|
286 |
+
self.mask.squeeze()[:, joined_dim - 1 :: joined_dim] = 0
|
287 |
+
|
288 |
+
self.n_head = config.n_head
|
289 |
+
|
290 |
+
def forward(
|
291 |
+
self,
|
292 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
293 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
294 |
+
use_cache: Optional[bool] = False,
|
295 |
+
output_attentions: Optional[bool] = False,
|
296 |
+
):
|
297 |
+
batch_size, sequence_length, embedding_dim = hidden_states.size()
|
298 |
+
|
299 |
+
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
|
300 |
+
# [ batch_size x n_heads x sequence_length x head_dim ]
|
301 |
+
key = (
|
302 |
+
self.key(hidden_states)
|
303 |
+
.view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
|
304 |
+
.transpose(1, 2)
|
305 |
+
)
|
306 |
+
query = (
|
307 |
+
self.query(hidden_states)
|
308 |
+
.view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
|
309 |
+
.transpose(1, 2)
|
310 |
+
)
|
311 |
+
value = (
|
312 |
+
self.value(hidden_states)
|
313 |
+
.view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
|
314 |
+
.transpose(1, 2)
|
315 |
+
)
|
316 |
+
|
317 |
+
if layer_past is not None:
|
318 |
+
past_key, past_value = layer_past
|
319 |
+
key = torch.cat((past_key, key), dim=-2)
|
320 |
+
value = torch.cat((past_value, value), dim=-2)
|
321 |
+
|
322 |
+
if use_cache is True:
|
323 |
+
present = (key, value)
|
324 |
+
else:
|
325 |
+
present = None
|
326 |
+
|
327 |
+
# causal self-attention
|
328 |
+
# [ batch_size x n_heads x sequence_length x sequence_length ]
|
329 |
+
attn_weights = (torch.matmul(query, key.transpose(-2, -1))) * (1.0 / math.sqrt(key.size(-1)))
|
330 |
+
attn_weights = attn_weights.masked_fill(
|
331 |
+
self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min
|
332 |
+
)
|
333 |
+
attn_weights = F.softmax(attn_weights, dim=-1)
|
334 |
+
self._attn_map = attn_weights.clone()
|
335 |
+
attn_weights = self.attn_drop(attn_weights)
|
336 |
+
|
337 |
+
output = torch.matmul(attn_weights, value)
|
338 |
+
# [ batch_size x sequence_length x embedding_dim ]
|
339 |
+
# re-assemble all head outputs side by side
|
340 |
+
output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim)
|
341 |
+
|
342 |
+
# output projection
|
343 |
+
output = self.resid_drop(self.proj(output))
|
344 |
+
|
345 |
+
outputs = (output, present)
|
346 |
+
if output_attentions:
|
347 |
+
outputs += (attn_weights,)
|
348 |
+
|
349 |
+
return outputs
|
350 |
+
|
351 |
+
|
352 |
+
class Block(nn.Module):
|
353 |
+
def __init__(self, config):
|
354 |
+
super().__init__()
|
355 |
+
self.ln1 = nn.LayerNorm(config.n_embd)
|
356 |
+
self.ln2 = nn.LayerNorm(config.n_embd)
|
357 |
+
self.attn = CausalSelfAttention(config)
|
358 |
+
|
359 |
+
# MLP
|
360 |
+
self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd)
|
361 |
+
self.act = nn.GELU()
|
362 |
+
self.l2 = nn.Linear(4 * config.n_embd, config.n_embd)
|
363 |
+
self.drop = nn.Dropout(config.resid_pdrop)
|
364 |
+
|
365 |
+
def forward(
|
366 |
+
self,
|
367 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
368 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
369 |
+
use_cache: Optional[bool] = False,
|
370 |
+
output_attentions: Optional[bool] = False,
|
371 |
+
):
|
372 |
+
residual = hidden_states
|
373 |
+
hidden_states = self.ln1(hidden_states)
|
374 |
+
|
375 |
+
attn_outputs = self.attn(
|
376 |
+
hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions
|
377 |
+
)
|
378 |
+
attn_output = attn_outputs[0]
|
379 |
+
outputs = attn_outputs[1:]
|
380 |
+
hidden_states = attn_output + residual
|
381 |
+
|
382 |
+
residual = hidden_states
|
383 |
+
hidden_states = self.ln2(hidden_states)
|
384 |
+
hidden_states = self.l1(hidden_states)
|
385 |
+
hidden_states = self.act(hidden_states)
|
386 |
+
hidden_states = self.l2(hidden_states)
|
387 |
+
hidden_states = residual + self.drop(hidden_states)
|
388 |
+
|
389 |
+
if use_cache:
|
390 |
+
outputs = (hidden_states,) + outputs
|
391 |
+
else:
|
392 |
+
outputs = (hidden_states,) + outputs[1:]
|
393 |
+
|
394 |
+
return outputs
|
395 |
+
|
396 |
+
|
397 |
+
@add_start_docstrings(
|
398 |
+
"The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.",
|
399 |
+
TRAJECTORY_TRANSFORMER_START_DOCSTRING,
|
400 |
+
)
|
401 |
+
class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel):
|
402 |
+
"""the full GPT language model, with a context size of block_size"""
|
403 |
+
|
404 |
+
def __init__(self, config):
|
405 |
+
super().__init__(config)
|
406 |
+
|
407 |
+
# input embedding stem (+1 for stop token)
|
408 |
+
self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd)
|
409 |
+
|
410 |
+
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
|
411 |
+
self.drop = nn.Dropout(config.embd_pdrop)
|
412 |
+
# transformer
|
413 |
+
self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])
|
414 |
+
# decoder head
|
415 |
+
self.ln_f = nn.LayerNorm(config.n_embd)
|
416 |
+
self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False)
|
417 |
+
|
418 |
+
self.vocab_size = config.vocab_size
|
419 |
+
self.stop_token = config.vocab_size * config.transition_dim
|
420 |
+
self.block_size = config.block_size
|
421 |
+
|
422 |
+
self.observation_dim = config.observation_dim
|
423 |
+
self.action_dim = config.action_dim
|
424 |
+
self.transition_dim = config.transition_dim
|
425 |
+
self.embedding_dim = config.n_embd
|
426 |
+
|
427 |
+
self.action_weight = config.action_weight
|
428 |
+
self.reward_weight = config.reward_weight
|
429 |
+
self.value_weight = config.value_weight
|
430 |
+
|
431 |
+
self.gradient_checkpointing = False
|
432 |
+
|
433 |
+
self.post_init()
|
434 |
+
|
435 |
+
def get_block_size(self):
|
436 |
+
return self.block_size
|
437 |
+
|
438 |
+
def offset_tokens(self, trajectories):
|
439 |
+
_, sequence_length = trajectories.shape
|
440 |
+
|
441 |
+
n_states = int(np.ceil(sequence_length / self.transition_dim))
|
442 |
+
|
443 |
+
offsets = torch.arange(self.transition_dim) * self.vocab_size
|
444 |
+
offsets = offsets.repeat(n_states).to(trajectories.device)
|
445 |
+
|
446 |
+
offset_trajectories = trajectories + offsets[:sequence_length]
|
447 |
+
offset_trajectories[trajectories == self.vocab_size] = self.stop_token
|
448 |
+
return offset_trajectories
|
449 |
+
|
450 |
+
def pad_to_full_observation(self, hidden_states):
|
451 |
+
batch_size, sequence_length, _ = hidden_states.shape
|
452 |
+
|
453 |
+
n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim
|
454 |
+
padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device)
|
455 |
+
|
456 |
+
# [ batch_size x padded_sequence_length' x embedding_dim ]
|
457 |
+
hidden_states_pad = torch.cat([hidden_states, padding], dim=1)
|
458 |
+
hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim)
|
459 |
+
|
460 |
+
return hidden_states_pad, n_pad
|
461 |
+
|
462 |
+
@add_start_docstrings_to_model_forward(
|
463 |
+
TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")
|
464 |
+
)
|
465 |
+
@replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC)
|
466 |
+
def forward(
|
467 |
+
self,
|
468 |
+
trajectories: Optional[torch.LongTensor] = None,
|
469 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
470 |
+
targets: Optional[torch.FloatTensor] = None,
|
471 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
472 |
+
use_cache: Optional[bool] = None,
|
473 |
+
output_attentions: Optional[bool] = None,
|
474 |
+
output_hidden_states: Optional[bool] = None,
|
475 |
+
return_dict: Optional[bool] = None,
|
476 |
+
) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]:
|
477 |
+
r"""
|
478 |
+
Returns:
|
479 |
+
|
480 |
+
Examples:
|
481 |
+
|
482 |
+
```python
|
483 |
+
>>> from transformers import TrajectoryTransformerModel
|
484 |
+
>>> import torch
|
485 |
+
|
486 |
+
>>> model = TrajectoryTransformerModel.from_pretrained(
|
487 |
+
... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
|
488 |
+
... )
|
489 |
+
>>> model.to(device)
|
490 |
+
>>> model.eval()
|
491 |
+
|
492 |
+
>>> observations_dim, action_dim, batch_size = 17, 6, 256
|
493 |
+
>>> seq_length = observations_dim + action_dim + 1
|
494 |
+
|
495 |
+
>>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(
|
496 |
+
... device
|
497 |
+
... )
|
498 |
+
>>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device)
|
499 |
+
|
500 |
+
>>> outputs = model(
|
501 |
+
... trajectories,
|
502 |
+
... targets=targets,
|
503 |
+
... use_cache=True,
|
504 |
+
... output_attentions=True,
|
505 |
+
... output_hidden_states=True,
|
506 |
+
... return_dict=True,
|
507 |
+
... )
|
508 |
+
```
|
509 |
+
"""
|
510 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
511 |
+
output_hidden_states = (
|
512 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
513 |
+
)
|
514 |
+
|
515 |
+
if past_key_values is None:
|
516 |
+
past_key_values = tuple([None] * len(self.blocks))
|
517 |
+
|
518 |
+
batch_size, sequence_length = trajectories.size()
|
519 |
+
|
520 |
+
if sequence_length > self.block_size:
|
521 |
+
raise ValueError("Cannot forward, model block size is exhausted.")
|
522 |
+
|
523 |
+
offset_trajectories = self.offset_tokens(trajectories)
|
524 |
+
# [ batch_size x sequence_length x embedding_dim ]
|
525 |
+
# forward the GPT model
|
526 |
+
token_embeddings = self.tok_emb(offset_trajectories) # each index maps to a (learnable) vector
|
527 |
+
position_embeddings = self.pos_emb[:, :sequence_length, :] # each position maps to a (learnable) vector
|
528 |
+
|
529 |
+
hidden_states = self.drop(token_embeddings + position_embeddings)
|
530 |
+
|
531 |
+
if self.gradient_checkpointing and self.training:
|
532 |
+
if use_cache:
|
533 |
+
logger.warning_once(
|
534 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
535 |
+
)
|
536 |
+
use_cache = False
|
537 |
+
|
538 |
+
presents = () if use_cache else None
|
539 |
+
all_self_attentions = () if output_attentions else None
|
540 |
+
all_hidden_states = () if output_hidden_states else None
|
541 |
+
|
542 |
+
for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)):
|
543 |
+
if output_hidden_states:
|
544 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
545 |
+
|
546 |
+
if self.gradient_checkpointing and self.training:
|
547 |
+
outputs = self._gradient_checkpointing_func(
|
548 |
+
block.__call__,
|
549 |
+
hidden_states,
|
550 |
+
layer_past,
|
551 |
+
use_cache,
|
552 |
+
output_attentions,
|
553 |
+
)
|
554 |
+
else:
|
555 |
+
outputs = block(hidden_states, layer_past, use_cache, output_attentions)
|
556 |
+
|
557 |
+
hidden_states = outputs[0]
|
558 |
+
if use_cache is True:
|
559 |
+
presents = presents + (outputs[1],)
|
560 |
+
|
561 |
+
if output_attentions:
|
562 |
+
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
|
563 |
+
|
564 |
+
# [ batch_size x sequence_length x embedding_dim ]
|
565 |
+
hidden_state = self.ln_f(hidden_states)
|
566 |
+
|
567 |
+
if output_hidden_states:
|
568 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
569 |
+
|
570 |
+
hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state)
|
571 |
+
|
572 |
+
logits = self.head(hidden_states_pad)
|
573 |
+
logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1)
|
574 |
+
logits = logits[:, :sequence_length]
|
575 |
+
|
576 |
+
# if we are given some desired targets also calculate the loss
|
577 |
+
if targets is not None:
|
578 |
+
loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction="none")
|
579 |
+
if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1:
|
580 |
+
# make weights
|
581 |
+
n_states = int(np.ceil(sequence_length / self.transition_dim))
|
582 |
+
weights = torch.cat(
|
583 |
+
[
|
584 |
+
torch.ones(self.observation_dim, device=trajectories.device),
|
585 |
+
torch.ones(self.action_dim, device=trajectories.device) * self.action_weight,
|
586 |
+
torch.ones(1, device=trajectories.device) * self.reward_weight,
|
587 |
+
torch.ones(1, device=trajectories.device) * self.value_weight,
|
588 |
+
]
|
589 |
+
)
|
590 |
+
weights = weights.repeat(n_states)
|
591 |
+
weights = weights[1:].repeat(batch_size, 1)
|
592 |
+
loss = loss * weights.view(-1)
|
593 |
+
loss = (loss * attention_mask.view(-1)).mean()
|
594 |
+
else:
|
595 |
+
loss = None
|
596 |
+
|
597 |
+
if not return_dict:
|
598 |
+
return tuple(v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None)
|
599 |
+
|
600 |
+
return TrajectoryTransformerOutput(
|
601 |
+
loss=loss,
|
602 |
+
logits=logits,
|
603 |
+
past_key_values=presents,
|
604 |
+
hidden_states=all_hidden_states,
|
605 |
+
attentions=all_self_attentions,
|
606 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" Transformer XL configuration"""
|
17 |
+
|
18 |
+
from ....configuration_utils import PretrainedConfig
|
19 |
+
from ....utils import logging
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.get_logger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
from .._archive_maps import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
26 |
+
|
27 |
+
|
28 |
+
class TransfoXLConfig(PretrainedConfig):
|
29 |
+
"""
|
30 |
+
This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is
|
31 |
+
used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture.
|
32 |
+
Instantiating a configuration with the defaults will yield a similar configuration to that of the TransfoXL
|
33 |
+
[transfo-xl/transfo-xl-wt103](https://huggingface.co/transfo-xl/transfo-xl-wt103) architecture.
|
34 |
+
|
35 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
36 |
+
documentation from [`PretrainedConfig`] for more information.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_size (`int`, *optional*, defaults to 267735):
|
40 |
+
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
|
41 |
+
`inputs_ids` passed when calling [`TransfoXLModel`] or [`TFTransfoXLModel`].
|
42 |
+
cutoffs (`List[int]`, *optional*, defaults to `[20000, 40000, 200000]`):
|
43 |
+
Cutoffs for the adaptive softmax.
|
44 |
+
d_model (`int`, *optional*, defaults to 1024):
|
45 |
+
Dimensionality of the model's hidden states.
|
46 |
+
d_embed (`int`, *optional*, defaults to 1024):
|
47 |
+
Dimensionality of the embeddings
|
48 |
+
n_head (`int`, *optional*, defaults to 16):
|
49 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
50 |
+
d_head (`int`, *optional*, defaults to 64):
|
51 |
+
Dimensionality of the model's heads.
|
52 |
+
d_inner (`int`, *optional*, defaults to 4096):
|
53 |
+
Inner dimension in FF
|
54 |
+
div_val (`int`, *optional*, defaults to 4):
|
55 |
+
Divident value for adapative input and softmax
|
56 |
+
pre_lnorm (`boolean`, *optional*, defaults to `False`):
|
57 |
+
Whether or not to apply LayerNorm to the input instead of the output in the blocks.
|
58 |
+
n_layer (`int`, *optional*, defaults to 18):
|
59 |
+
Number of hidden layers in the Transformer encoder.
|
60 |
+
mem_len (`int`, *optional*, defaults to 1600):
|
61 |
+
Length of the retained previous heads.
|
62 |
+
clamp_len (`int`, *optional*, defaults to 1000):
|
63 |
+
Use the same pos embeddings after clamp_len.
|
64 |
+
same_length (`boolean`, *optional*, defaults to `True`):
|
65 |
+
Whether or not to use the same attn length for all tokens
|
66 |
+
proj_share_all_but_first (`boolean`, *optional*, defaults to `True`):
|
67 |
+
True to share all but first projs, False not to share.
|
68 |
+
attn_type (`int`, *optional*, defaults to 0):
|
69 |
+
Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
|
70 |
+
sample_softmax (`int`, *optional*, defaults to -1):
|
71 |
+
Number of samples in the sampled softmax.
|
72 |
+
adaptive (`boolean`, *optional*, defaults to `True`):
|
73 |
+
Whether or not to use adaptive softmax.
|
74 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
75 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
76 |
+
dropatt (`float`, *optional*, defaults to 0.0):
|
77 |
+
The dropout ratio for the attention probabilities.
|
78 |
+
untie_r (`boolean`, *optional*, defaults to `True`):
|
79 |
+
Whether ot not to untie relative position biases.
|
80 |
+
init (`str`, *optional*, defaults to `"normal"`):
|
81 |
+
Parameter initializer to use.
|
82 |
+
init_range (`float`, *optional*, defaults to 0.01):
|
83 |
+
Parameters initialized by U(-init_range, init_range).
|
84 |
+
proj_init_std (`float`, *optional*, defaults to 0.01):
|
85 |
+
Parameters initialized by N(0, init_std)
|
86 |
+
init_std (`float`, *optional*, defaults to 0.02):
|
87 |
+
Parameters initialized by N(0, init_std)
|
88 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
|
89 |
+
The epsilon to use in the layer normalization layers
|
90 |
+
eos_token_id (`int`, *optional*, defaults to 0):
|
91 |
+
End of stream token id.
|
92 |
+
|
93 |
+
Examples:
|
94 |
+
|
95 |
+
```python
|
96 |
+
>>> from transformers import TransfoXLConfig, TransfoXLModel
|
97 |
+
|
98 |
+
>>> # Initializing a Transformer XL configuration
|
99 |
+
>>> configuration = TransfoXLConfig()
|
100 |
+
|
101 |
+
>>> # Initializing a model (with random weights) from the configuration
|
102 |
+
>>> model = TransfoXLModel(configuration)
|
103 |
+
|
104 |
+
>>> # Accessing the model configuration
|
105 |
+
>>> configuration = model.config
|
106 |
+
```"""
|
107 |
+
|
108 |
+
model_type = "transfo-xl"
|
109 |
+
keys_to_ignore_at_inference = ["mems"]
|
110 |
+
attribute_map = {
|
111 |
+
"n_token": "vocab_size",
|
112 |
+
"hidden_size": "d_model",
|
113 |
+
"num_attention_heads": "n_head",
|
114 |
+
"num_hidden_layers": "n_layer",
|
115 |
+
}
|
116 |
+
|
117 |
+
def __init__(
|
118 |
+
self,
|
119 |
+
vocab_size=267735,
|
120 |
+
cutoffs=[20000, 40000, 200000],
|
121 |
+
d_model=1024,
|
122 |
+
d_embed=1024,
|
123 |
+
n_head=16,
|
124 |
+
d_head=64,
|
125 |
+
d_inner=4096,
|
126 |
+
div_val=4,
|
127 |
+
pre_lnorm=False,
|
128 |
+
n_layer=18,
|
129 |
+
mem_len=1600,
|
130 |
+
clamp_len=1000,
|
131 |
+
same_length=True,
|
132 |
+
proj_share_all_but_first=True,
|
133 |
+
attn_type=0,
|
134 |
+
sample_softmax=-1,
|
135 |
+
adaptive=True,
|
136 |
+
dropout=0.1,
|
137 |
+
dropatt=0.0,
|
138 |
+
untie_r=True,
|
139 |
+
init="normal",
|
140 |
+
init_range=0.01,
|
141 |
+
proj_init_std=0.01,
|
142 |
+
init_std=0.02,
|
143 |
+
layer_norm_epsilon=1e-5,
|
144 |
+
eos_token_id=0,
|
145 |
+
**kwargs,
|
146 |
+
):
|
147 |
+
self.vocab_size = vocab_size
|
148 |
+
self.cutoffs = []
|
149 |
+
self.cutoffs.extend(cutoffs)
|
150 |
+
if proj_share_all_but_first:
|
151 |
+
self.tie_projs = [False] + [True] * len(self.cutoffs)
|
152 |
+
else:
|
153 |
+
self.tie_projs = [False] + [False] * len(self.cutoffs)
|
154 |
+
self.d_model = d_model
|
155 |
+
self.d_embed = d_embed
|
156 |
+
self.d_head = d_head
|
157 |
+
self.d_inner = d_inner
|
158 |
+
self.div_val = div_val
|
159 |
+
self.pre_lnorm = pre_lnorm
|
160 |
+
self.n_layer = n_layer
|
161 |
+
self.n_head = n_head
|
162 |
+
self.mem_len = mem_len
|
163 |
+
self.same_length = same_length
|
164 |
+
self.attn_type = attn_type
|
165 |
+
self.clamp_len = clamp_len
|
166 |
+
self.sample_softmax = sample_softmax
|
167 |
+
self.adaptive = adaptive
|
168 |
+
self.dropout = dropout
|
169 |
+
self.dropatt = dropatt
|
170 |
+
self.untie_r = untie_r
|
171 |
+
self.init = init
|
172 |
+
self.init_range = init_range
|
173 |
+
self.proj_init_std = proj_init_std
|
174 |
+
self.init_std = init_std
|
175 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
176 |
+
super().__init__(eos_token_id=eos_token_id, **kwargs)
|
177 |
+
|
178 |
+
@property
|
179 |
+
def max_position_embeddings(self):
|
180 |
+
# Message copied from Transformer-XL documentation
|
181 |
+
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
|
182 |
+
return -1
|
183 |
+
|
184 |
+
@max_position_embeddings.setter
|
185 |
+
def max_position_embeddings(self, value):
|
186 |
+
# Message copied from Transformer-XL documentation
|
187 |
+
raise NotImplementedError(
|
188 |
+
f"The model {self.model_type} is one of the few models that has no sequence length limit."
|
189 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert Transformer XL checkpoint and datasets."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import os
|
20 |
+
import pickle
|
21 |
+
import sys
|
22 |
+
|
23 |
+
import torch
|
24 |
+
|
25 |
+
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
|
26 |
+
from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils
|
27 |
+
from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
|
28 |
+
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
|
29 |
+
|
30 |
+
|
31 |
+
logging.set_verbosity_info()
|
32 |
+
|
33 |
+
# We do this to be able to load python 2 datasets pickles
|
34 |
+
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
|
35 |
+
data_utils.Vocab = data_utils.TransfoXLTokenizer
|
36 |
+
data_utils.Corpus = data_utils.TransfoXLCorpus
|
37 |
+
sys.modules["data_utils"] = data_utils
|
38 |
+
sys.modules["vocabulary"] = data_utils
|
39 |
+
|
40 |
+
|
41 |
+
def convert_transfo_xl_checkpoint_to_pytorch(
|
42 |
+
tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
|
43 |
+
):
|
44 |
+
if transfo_xl_dataset_file:
|
45 |
+
# Convert a pre-processed corpus (see original TensorFlow repo)
|
46 |
+
with open(transfo_xl_dataset_file, "rb") as fp:
|
47 |
+
corpus = pickle.load(fp, encoding="latin1")
|
48 |
+
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
|
49 |
+
pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
|
50 |
+
print(f"Save vocabulary to {pytorch_vocab_dump_path}")
|
51 |
+
corpus_vocab_dict = corpus.vocab.__dict__
|
52 |
+
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
|
53 |
+
|
54 |
+
corpus_dict_no_vocab = corpus.__dict__
|
55 |
+
corpus_dict_no_vocab.pop("vocab", None)
|
56 |
+
pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
|
57 |
+
print(f"Save dataset to {pytorch_dataset_dump_path}")
|
58 |
+
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
|
59 |
+
|
60 |
+
if tf_checkpoint_path:
|
61 |
+
# Convert a pre-trained TensorFlow model
|
62 |
+
config_path = os.path.abspath(transfo_xl_config_file)
|
63 |
+
tf_path = os.path.abspath(tf_checkpoint_path)
|
64 |
+
|
65 |
+
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.")
|
66 |
+
# Initialise PyTorch model
|
67 |
+
if transfo_xl_config_file == "":
|
68 |
+
config = TransfoXLConfig()
|
69 |
+
else:
|
70 |
+
config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
|
71 |
+
print(f"Building PyTorch model from configuration: {config}")
|
72 |
+
model = TransfoXLLMHeadModel(config)
|
73 |
+
|
74 |
+
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
|
75 |
+
# Save pytorch-model
|
76 |
+
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
|
77 |
+
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
|
78 |
+
print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
|
79 |
+
torch.save(model.state_dict(), pytorch_weights_dump_path)
|
80 |
+
print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
|
81 |
+
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
|
82 |
+
f.write(config.to_json_string())
|
83 |
+
|
84 |
+
|
85 |
+
if __name__ == "__main__":
|
86 |
+
parser = argparse.ArgumentParser()
|
87 |
+
parser.add_argument(
|
88 |
+
"--pytorch_dump_folder_path",
|
89 |
+
default=None,
|
90 |
+
type=str,
|
91 |
+
required=True,
|
92 |
+
help="Path to the folder to store the PyTorch model or dataset/vocab.",
|
93 |
+
)
|
94 |
+
parser.add_argument(
|
95 |
+
"--tf_checkpoint_path",
|
96 |
+
default="",
|
97 |
+
type=str,
|
98 |
+
help="An optional path to a TensorFlow checkpoint path to be converted.",
|
99 |
+
)
|
100 |
+
parser.add_argument(
|
101 |
+
"--transfo_xl_config_file",
|
102 |
+
default="",
|
103 |
+
type=str,
|
104 |
+
help=(
|
105 |
+
"An optional config json file corresponding to the pre-trained BERT model. \n"
|
106 |
+
"This specifies the model architecture."
|
107 |
+
),
|
108 |
+
)
|
109 |
+
parser.add_argument(
|
110 |
+
"--transfo_xl_dataset_file",
|
111 |
+
default="",
|
112 |
+
type=str,
|
113 |
+
help="An optional dataset file to be converted in a vocabulary.",
|
114 |
+
)
|
115 |
+
args = parser.parse_args()
|
116 |
+
convert_transfo_xl_checkpoint_to_pytorch(
|
117 |
+
args.tf_checkpoint_path,
|
118 |
+
args.transfo_xl_config_file,
|
119 |
+
args.pytorch_dump_folder_path,
|
120 |
+
args.transfo_xl_dataset_file,
|
121 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
ADDED
@@ -0,0 +1,1122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""
|
17 |
+
TF 2.0 Transformer XL model.
|
18 |
+
"""
|
19 |
+
|
20 |
+
from __future__ import annotations
|
21 |
+
|
22 |
+
from dataclasses import dataclass
|
23 |
+
from typing import List, Optional, Tuple, Union
|
24 |
+
|
25 |
+
import numpy as np
|
26 |
+
import tensorflow as tf
|
27 |
+
|
28 |
+
from ....modeling_tf_utils import (
|
29 |
+
TFModelInputType,
|
30 |
+
TFPreTrainedModel,
|
31 |
+
TFSequenceClassificationLoss,
|
32 |
+
get_initializer,
|
33 |
+
keras,
|
34 |
+
keras_serializable,
|
35 |
+
unpack_inputs,
|
36 |
+
)
|
37 |
+
from ....tf_utils import shape_list, stable_softmax
|
38 |
+
from ....utils import (
|
39 |
+
ModelOutput,
|
40 |
+
add_code_sample_docstrings,
|
41 |
+
add_start_docstrings,
|
42 |
+
add_start_docstrings_to_model_forward,
|
43 |
+
logging,
|
44 |
+
)
|
45 |
+
from .configuration_transfo_xl import TransfoXLConfig
|
46 |
+
from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
|
47 |
+
|
48 |
+
|
49 |
+
logger = logging.get_logger(__name__)
|
50 |
+
|
51 |
+
_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
|
52 |
+
_CONFIG_FOR_DOC = "TransfoXLConfig"
|
53 |
+
|
54 |
+
|
55 |
+
from .._archive_maps import TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
56 |
+
|
57 |
+
|
58 |
+
class TFPositionalEmbedding(keras.layers.Layer):
|
59 |
+
def __init__(self, demb, **kwargs):
|
60 |
+
super().__init__(**kwargs)
|
61 |
+
|
62 |
+
self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb))
|
63 |
+
|
64 |
+
def call(self, pos_seq, bsz=None):
|
65 |
+
self.inv_freq = tf.cast(self.inv_freq, dtype=pos_seq.dtype)
|
66 |
+
sinusoid_inp = tf.einsum("i,j->ij", pos_seq, self.inv_freq)
|
67 |
+
pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
|
68 |
+
|
69 |
+
if bsz is not None:
|
70 |
+
return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
|
71 |
+
else:
|
72 |
+
return pos_emb[:, None, :]
|
73 |
+
|
74 |
+
|
75 |
+
class TFPositionwiseFF(keras.layers.Layer):
|
76 |
+
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs):
|
77 |
+
super().__init__(**kwargs)
|
78 |
+
|
79 |
+
self.d_model = d_model
|
80 |
+
self.d_inner = d_inner
|
81 |
+
self.dropout = dropout
|
82 |
+
|
83 |
+
self.layer_1 = keras.layers.Dense(
|
84 |
+
d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name="CoreNet_._0"
|
85 |
+
)
|
86 |
+
self.drop_1 = keras.layers.Dropout(dropout)
|
87 |
+
self.layer_2 = keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name="CoreNet_._3")
|
88 |
+
self.drop_2 = keras.layers.Dropout(dropout)
|
89 |
+
|
90 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
|
91 |
+
|
92 |
+
self.pre_lnorm = pre_lnorm
|
93 |
+
|
94 |
+
def call(self, inp, training=False):
|
95 |
+
if self.pre_lnorm:
|
96 |
+
# layer normalization + positionwise feed-forward
|
97 |
+
core_out = self.layer_norm(inp)
|
98 |
+
core_out = self.layer_1(core_out)
|
99 |
+
core_out = self.drop_1(core_out, training=training)
|
100 |
+
core_out = self.layer_2(core_out)
|
101 |
+
core_out = self.drop_2(core_out, training=training)
|
102 |
+
|
103 |
+
# residual connection
|
104 |
+
output = core_out + inp
|
105 |
+
else:
|
106 |
+
# positionwise feed-forward
|
107 |
+
core_out = self.layer_1(inp)
|
108 |
+
core_out = self.drop_1(core_out, training=training)
|
109 |
+
core_out = self.layer_2(core_out)
|
110 |
+
core_out = self.drop_2(core_out, training=training)
|
111 |
+
|
112 |
+
# residual connection + layer normalization
|
113 |
+
output = self.layer_norm(inp + core_out)
|
114 |
+
|
115 |
+
return output
|
116 |
+
|
117 |
+
|
118 |
+
class TFRelPartialLearnableMultiHeadAttn(keras.layers.Layer):
|
119 |
+
def __init__(
|
120 |
+
self,
|
121 |
+
n_head,
|
122 |
+
d_model,
|
123 |
+
d_head,
|
124 |
+
dropout,
|
125 |
+
dropatt=0.0,
|
126 |
+
pre_lnorm=False,
|
127 |
+
r_r_bias=None,
|
128 |
+
r_w_bias=None,
|
129 |
+
layer_norm_epsilon=1e-5,
|
130 |
+
init_std=0.02,
|
131 |
+
output_attentions=False,
|
132 |
+
**kwargs,
|
133 |
+
):
|
134 |
+
super().__init__(**kwargs)
|
135 |
+
|
136 |
+
self.n_head = n_head
|
137 |
+
self.d_model = d_model
|
138 |
+
self.d_head = d_head
|
139 |
+
self.dropout = dropout
|
140 |
+
self.output_attentions = output_attentions
|
141 |
+
|
142 |
+
self.qkv_net = keras.layers.Dense(
|
143 |
+
3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="qkv_net"
|
144 |
+
)
|
145 |
+
|
146 |
+
self.drop = keras.layers.Dropout(dropout)
|
147 |
+
self.dropatt = keras.layers.Dropout(dropatt)
|
148 |
+
self.o_net = keras.layers.Dense(
|
149 |
+
d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name="o_net"
|
150 |
+
)
|
151 |
+
|
152 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
|
153 |
+
|
154 |
+
self.scale = 1 / (d_head**0.5)
|
155 |
+
|
156 |
+
self.pre_lnorm = pre_lnorm
|
157 |
+
|
158 |
+
if r_r_bias is not None and r_w_bias is not None: # Biases are shared
|
159 |
+
self.r_r_bias = r_r_bias
|
160 |
+
self.r_w_bias = r_w_bias
|
161 |
+
else:
|
162 |
+
self.r_r_bias = None
|
163 |
+
self.r_w_bias = None
|
164 |
+
|
165 |
+
self.r_net = keras.layers.Dense(
|
166 |
+
self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="r_net"
|
167 |
+
)
|
168 |
+
|
169 |
+
def build(self, input_shape):
|
170 |
+
if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared
|
171 |
+
self.r_r_bias = self.add_weight(
|
172 |
+
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
|
173 |
+
)
|
174 |
+
self.r_w_bias = self.add_weight(
|
175 |
+
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
|
176 |
+
)
|
177 |
+
super().build(input_shape)
|
178 |
+
|
179 |
+
def _rel_shift(self, x):
|
180 |
+
x_size = shape_list(x)
|
181 |
+
|
182 |
+
x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])
|
183 |
+
x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])
|
184 |
+
x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
|
185 |
+
x = tf.reshape(x, x_size)
|
186 |
+
|
187 |
+
return x
|
188 |
+
|
189 |
+
def call(self, w, r, attn_mask, mems, head_mask, output_attentions, training=False):
|
190 |
+
qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]
|
191 |
+
|
192 |
+
if mems is not None:
|
193 |
+
mems = tf.cast(mems, dtype=w.dtype)
|
194 |
+
cat = tf.concat([mems, w], 0)
|
195 |
+
if self.pre_lnorm:
|
196 |
+
w_heads = self.qkv_net(self.layer_norm(cat))
|
197 |
+
else:
|
198 |
+
w_heads = self.qkv_net(cat)
|
199 |
+
r_head_k = self.r_net(r)
|
200 |
+
|
201 |
+
w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
|
202 |
+
w_head_q = w_head_q[-qlen:]
|
203 |
+
else:
|
204 |
+
if self.pre_lnorm:
|
205 |
+
w_heads = self.qkv_net(self.layer_norm(w))
|
206 |
+
else:
|
207 |
+
w_heads = self.qkv_net(w)
|
208 |
+
r_head_k = self.r_net(r)
|
209 |
+
|
210 |
+
w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
|
211 |
+
|
212 |
+
klen = shape_list(w_head_k)[0]
|
213 |
+
|
214 |
+
w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
|
215 |
+
w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
|
216 |
+
w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
|
217 |
+
|
218 |
+
r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head
|
219 |
+
|
220 |
+
# compute attention score
|
221 |
+
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
|
222 |
+
AC = tf.einsum("ibnd,jbnd->ijbn", rw_head_q, w_head_k) # qlen x klen x bsz x n_head
|
223 |
+
|
224 |
+
rr_head_q = w_head_q + self.r_r_bias
|
225 |
+
BD = tf.einsum("ibnd,jnd->ijbn", rr_head_q, r_head_k) # qlen x klen x bsz x n_head
|
226 |
+
BD = self._rel_shift(BD)
|
227 |
+
|
228 |
+
# [qlen x klen x bsz x n_head]
|
229 |
+
attn_score = AC + BD
|
230 |
+
attn_score = attn_score * self.scale
|
231 |
+
|
232 |
+
# compute attention probability
|
233 |
+
if attn_mask is not None:
|
234 |
+
attn_mask_t = attn_mask[:, :, None, None]
|
235 |
+
attn_mask_t = tf.cast(attn_mask_t, dtype=attn_score.dtype)
|
236 |
+
attn_score = attn_score * (1.0 - attn_mask_t) - 1e30 * attn_mask_t
|
237 |
+
|
238 |
+
# [qlen x klen x bsz x n_head]
|
239 |
+
attn_prob = stable_softmax(attn_score, axis=1)
|
240 |
+
attn_prob = self.dropatt(attn_prob, training=training)
|
241 |
+
|
242 |
+
# Mask heads if we want to
|
243 |
+
if head_mask is not None:
|
244 |
+
attn_prob = attn_prob * head_mask
|
245 |
+
|
246 |
+
# compute attention vector
|
247 |
+
attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, w_head_v)
|
248 |
+
|
249 |
+
# [qlen x bsz x n_head x d_head]
|
250 |
+
attn_vec_sizes = shape_list(attn_vec)
|
251 |
+
attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head))
|
252 |
+
|
253 |
+
# linear projection
|
254 |
+
attn_out = self.o_net(attn_vec)
|
255 |
+
attn_out = self.drop(attn_out, training=training)
|
256 |
+
|
257 |
+
if self.pre_lnorm:
|
258 |
+
# residual connection
|
259 |
+
outputs = [w + attn_out]
|
260 |
+
else:
|
261 |
+
# residual connection + layer normalization
|
262 |
+
outputs = [self.layer_norm(w + attn_out)]
|
263 |
+
|
264 |
+
if output_attentions:
|
265 |
+
outputs.append(attn_prob)
|
266 |
+
|
267 |
+
return outputs
|
268 |
+
|
269 |
+
|
270 |
+
class TFRelPartialLearnableDecoderLayer(keras.layers.Layer):
|
271 |
+
def __init__(
|
272 |
+
self,
|
273 |
+
n_head,
|
274 |
+
d_model,
|
275 |
+
d_head,
|
276 |
+
d_inner,
|
277 |
+
dropout,
|
278 |
+
dropatt=0.0,
|
279 |
+
pre_lnorm=False,
|
280 |
+
r_w_bias=None,
|
281 |
+
r_r_bias=None,
|
282 |
+
layer_norm_epsilon=1e-5,
|
283 |
+
init_std=0.02,
|
284 |
+
output_attentions=False,
|
285 |
+
**kwargs,
|
286 |
+
):
|
287 |
+
super().__init__(**kwargs)
|
288 |
+
|
289 |
+
self.dec_attn = TFRelPartialLearnableMultiHeadAttn(
|
290 |
+
n_head,
|
291 |
+
d_model,
|
292 |
+
d_head,
|
293 |
+
dropout,
|
294 |
+
dropatt=dropatt,
|
295 |
+
pre_lnorm=pre_lnorm,
|
296 |
+
r_w_bias=r_w_bias,
|
297 |
+
r_r_bias=r_r_bias,
|
298 |
+
init_std=init_std,
|
299 |
+
layer_norm_epsilon=layer_norm_epsilon,
|
300 |
+
output_attentions=output_attentions,
|
301 |
+
name="dec_attn",
|
302 |
+
)
|
303 |
+
self.pos_ff = TFPositionwiseFF(
|
304 |
+
d_model,
|
305 |
+
d_inner,
|
306 |
+
dropout,
|
307 |
+
pre_lnorm=pre_lnorm,
|
308 |
+
init_std=init_std,
|
309 |
+
layer_norm_epsilon=layer_norm_epsilon,
|
310 |
+
name="pos_ff",
|
311 |
+
)
|
312 |
+
|
313 |
+
def call(self, dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=False):
|
314 |
+
attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=training)
|
315 |
+
ff_output = self.pos_ff(attn_outputs[0], training=training)
|
316 |
+
|
317 |
+
outputs = [ff_output] + attn_outputs[1:]
|
318 |
+
|
319 |
+
return outputs
|
320 |
+
|
321 |
+
|
322 |
+
class TFTransfoEmbeddings(keras.layers.Layer):
|
323 |
+
def __init__(self, vocab_size, emb_size, init_std, **kwargs):
|
324 |
+
super().__init__(**kwargs)
|
325 |
+
|
326 |
+
self.vocab_size = vocab_size
|
327 |
+
self.emb_size = emb_size
|
328 |
+
self.init_std = init_std
|
329 |
+
|
330 |
+
def build(self, input_shape):
|
331 |
+
self.weight = self.add_weight(
|
332 |
+
shape=(self.vocab_size, self.emb_size),
|
333 |
+
initializer=get_initializer(self.init_std),
|
334 |
+
name="embeddings",
|
335 |
+
)
|
336 |
+
|
337 |
+
super().build(input_shape)
|
338 |
+
|
339 |
+
def call(self, inputs):
|
340 |
+
return tf.gather(self.weight, inputs)
|
341 |
+
|
342 |
+
|
343 |
+
class TFAdaptiveEmbedding(keras.layers.Layer):
|
344 |
+
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs):
|
345 |
+
super().__init__(**kwargs)
|
346 |
+
|
347 |
+
self.n_token = n_token
|
348 |
+
self.d_embed = d_embed
|
349 |
+
self.init_std = init_std
|
350 |
+
|
351 |
+
self.cutoffs = cutoffs + [n_token]
|
352 |
+
self.div_val = div_val
|
353 |
+
self.d_proj = d_proj
|
354 |
+
|
355 |
+
self.emb_scale = d_proj**0.5
|
356 |
+
|
357 |
+
self.cutoff_ends = [0] + self.cutoffs
|
358 |
+
|
359 |
+
self.emb_layers = []
|
360 |
+
self.emb_projs = []
|
361 |
+
|
362 |
+
if div_val == 1:
|
363 |
+
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
|
364 |
+
else:
|
365 |
+
for i in range(len(self.cutoffs)):
|
366 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
367 |
+
d_emb_i = d_embed // (div_val**i)
|
368 |
+
self.emb_layers.append(
|
369 |
+
TFTransfoEmbeddings(
|
370 |
+
r_idx - l_idx,
|
371 |
+
d_emb_i,
|
372 |
+
init_std,
|
373 |
+
name=f"emb_layers_._{i}",
|
374 |
+
)
|
375 |
+
)
|
376 |
+
|
377 |
+
def build(self, input_shape):
|
378 |
+
for i in range(len(self.cutoffs)):
|
379 |
+
d_emb_i = self.d_embed // (self.div_val**i)
|
380 |
+
self.emb_projs.append(
|
381 |
+
self.add_weight(
|
382 |
+
shape=(d_emb_i, self.d_proj),
|
383 |
+
initializer=get_initializer(self.init_std),
|
384 |
+
trainable=True,
|
385 |
+
name=f"emb_projs_._{i}",
|
386 |
+
)
|
387 |
+
)
|
388 |
+
|
389 |
+
super().build(input_shape)
|
390 |
+
|
391 |
+
def call(self, inp):
|
392 |
+
if self.div_val == 1:
|
393 |
+
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
|
394 |
+
else:
|
395 |
+
inp_flat = tf.reshape(inp, (-1,))
|
396 |
+
emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj])
|
397 |
+
for i in range(len(self.cutoffs)):
|
398 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
399 |
+
|
400 |
+
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
|
401 |
+
|
402 |
+
inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx
|
403 |
+
emb_i = self.emb_layers[i](inp_i)
|
404 |
+
emb_i = tf.einsum("id,de->ie", emb_i, self.emb_projs[i])
|
405 |
+
|
406 |
+
mask_idx = tf.where(mask_i)
|
407 |
+
scatter = tf.scatter_nd(mask_idx, emb_i, shape_list(emb_flat))
|
408 |
+
emb_flat = tf.cast(emb_flat, dtype=scatter.dtype)
|
409 |
+
emb_flat += scatter
|
410 |
+
|
411 |
+
embed_shape = shape_list(inp) + [self.d_proj]
|
412 |
+
embed = tf.reshape(emb_flat, embed_shape)
|
413 |
+
|
414 |
+
embed *= self.emb_scale
|
415 |
+
|
416 |
+
return embed
|
417 |
+
|
418 |
+
|
419 |
+
@keras_serializable
|
420 |
+
class TFTransfoXLMainLayer(keras.layers.Layer):
|
421 |
+
config_class = TransfoXLConfig
|
422 |
+
|
423 |
+
def __init__(self, config, **kwargs):
|
424 |
+
super().__init__(**kwargs)
|
425 |
+
|
426 |
+
self.config = config
|
427 |
+
self.output_hidden_states = config.output_hidden_states
|
428 |
+
self.output_attentions = config.output_attentions
|
429 |
+
self.return_dict = config.use_return_dict
|
430 |
+
|
431 |
+
self.n_token = config.vocab_size
|
432 |
+
|
433 |
+
self.d_embed = config.d_embed
|
434 |
+
self.d_model = config.d_model
|
435 |
+
self.n_head = config.n_head
|
436 |
+
self.d_head = config.d_head
|
437 |
+
self.untie_r = config.untie_r
|
438 |
+
|
439 |
+
self.word_emb = TFAdaptiveEmbedding(
|
440 |
+
config.vocab_size,
|
441 |
+
config.d_embed,
|
442 |
+
config.d_model,
|
443 |
+
config.cutoffs,
|
444 |
+
div_val=config.div_val,
|
445 |
+
init_std=config.init_std,
|
446 |
+
name="word_emb",
|
447 |
+
)
|
448 |
+
|
449 |
+
self.drop = keras.layers.Dropout(config.dropout)
|
450 |
+
|
451 |
+
self.n_layer = config.n_layer
|
452 |
+
self.mem_len = config.mem_len
|
453 |
+
self.attn_type = config.attn_type
|
454 |
+
|
455 |
+
self.layers = []
|
456 |
+
if config.attn_type == 0: # the default attention
|
457 |
+
for i in range(config.n_layer):
|
458 |
+
self.layers.append(
|
459 |
+
TFRelPartialLearnableDecoderLayer(
|
460 |
+
config.n_head,
|
461 |
+
config.d_model,
|
462 |
+
config.d_head,
|
463 |
+
config.d_inner,
|
464 |
+
config.dropout,
|
465 |
+
dropatt=config.dropatt,
|
466 |
+
pre_lnorm=config.pre_lnorm,
|
467 |
+
r_w_bias=None if self.untie_r else self.r_w_bias,
|
468 |
+
r_r_bias=None if self.untie_r else self.r_r_bias,
|
469 |
+
layer_norm_epsilon=config.layer_norm_epsilon,
|
470 |
+
init_std=config.init_std,
|
471 |
+
output_attentions=self.output_attentions,
|
472 |
+
name=f"layers_._{i}",
|
473 |
+
)
|
474 |
+
)
|
475 |
+
else: # learnable embeddings and absolute embeddings
|
476 |
+
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
|
477 |
+
|
478 |
+
self.same_length = config.same_length
|
479 |
+
self.clamp_len = config.clamp_len
|
480 |
+
|
481 |
+
if self.attn_type == 0: # default attention
|
482 |
+
self.pos_emb = TFPositionalEmbedding(self.d_model, name="pos_emb")
|
483 |
+
else: # learnable embeddings and absolute embeddings
|
484 |
+
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
|
485 |
+
|
486 |
+
def build(self, input_shape):
|
487 |
+
if not self.untie_r:
|
488 |
+
self.r_w_bias = self.add_weight(
|
489 |
+
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
|
490 |
+
)
|
491 |
+
self.r_r_bias = self.add_weight(
|
492 |
+
shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
|
493 |
+
)
|
494 |
+
super().build(input_shape)
|
495 |
+
|
496 |
+
def get_input_embeddings(self):
|
497 |
+
return self.word_emb
|
498 |
+
|
499 |
+
def set_input_embeddings(self, value):
|
500 |
+
raise NotImplementedError
|
501 |
+
|
502 |
+
def backward_compatible(self):
|
503 |
+
self.sample_softmax = -1
|
504 |
+
|
505 |
+
def reset_memory_length(self, mem_len):
|
506 |
+
self.mem_len = mem_len
|
507 |
+
|
508 |
+
def _prune_heads(self, heads):
|
509 |
+
raise NotImplementedError
|
510 |
+
|
511 |
+
def init_mems(self, bsz):
|
512 |
+
if self.mem_len > 0:
|
513 |
+
mems = []
|
514 |
+
for i in range(self.n_layer):
|
515 |
+
empty = tf.zeros([self.mem_len, bsz, self.d_model])
|
516 |
+
mems.append(empty)
|
517 |
+
|
518 |
+
return mems
|
519 |
+
else:
|
520 |
+
return None
|
521 |
+
|
522 |
+
def _update_mems(self, hids, mems, mlen, qlen):
|
523 |
+
# does not deal with None
|
524 |
+
if mems is None:
|
525 |
+
return None
|
526 |
+
|
527 |
+
# mems is not None
|
528 |
+
assert len(hids) == len(mems), "len(hids) != len(mems)"
|
529 |
+
|
530 |
+
# There are `mlen + qlen` steps that can be cached into mems
|
531 |
+
new_mems = []
|
532 |
+
end_idx = mlen + tf.math.maximum(0, qlen)
|
533 |
+
beg_idx = tf.math.maximum(0, end_idx - tf.convert_to_tensor(self.mem_len))
|
534 |
+
for i in range(len(hids)):
|
535 |
+
mems[i] = tf.cast(mems[i], dtype=hids[i].dtype)
|
536 |
+
cat = tf.concat([mems[i], hids[i]], axis=0)
|
537 |
+
tf.stop_gradient(cat)
|
538 |
+
new_mems.append(cat[beg_idx:end_idx])
|
539 |
+
|
540 |
+
return new_mems
|
541 |
+
|
542 |
+
@unpack_inputs
|
543 |
+
def call(
|
544 |
+
self,
|
545 |
+
input_ids: TFModelInputType | None = None,
|
546 |
+
mems: List[tf.Tensor] | None = None,
|
547 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
548 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
549 |
+
output_attentions: Optional[bool] = None,
|
550 |
+
output_hidden_states: Optional[bool] = None,
|
551 |
+
return_dict: Optional[bool] = None,
|
552 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
553 |
+
training: bool = False,
|
554 |
+
):
|
555 |
+
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
|
556 |
+
# so we transpose here from shape [bsz, len] to shape [len, bsz]
|
557 |
+
if input_ids is not None and inputs_embeds is not None:
|
558 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
559 |
+
elif input_ids is not None:
|
560 |
+
input_ids = tf.transpose(input_ids, perm=(1, 0))
|
561 |
+
qlen, bsz = shape_list(input_ids)
|
562 |
+
elif inputs_embeds is not None:
|
563 |
+
inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
|
564 |
+
qlen, bsz = shape_list(inputs_embeds)[:2]
|
565 |
+
else:
|
566 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
567 |
+
|
568 |
+
if mems is None:
|
569 |
+
mems = self.init_mems(bsz)
|
570 |
+
|
571 |
+
# Prepare head mask if needed
|
572 |
+
# 1.0 in head_mask indicate we keep the head
|
573 |
+
# attention_probs has shape bsz x n_heads x N x N
|
574 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
|
575 |
+
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
|
576 |
+
if head_mask is not None:
|
577 |
+
raise NotImplementedError
|
578 |
+
else:
|
579 |
+
head_mask = [None] * self.n_layer
|
580 |
+
|
581 |
+
if inputs_embeds is not None:
|
582 |
+
word_emb = inputs_embeds
|
583 |
+
else:
|
584 |
+
word_emb = self.word_emb(input_ids)
|
585 |
+
|
586 |
+
mlen = shape_list(mems[0])[0] if mems is not None else 0
|
587 |
+
klen = mlen + qlen
|
588 |
+
|
589 |
+
# Compute decoder attention mask
|
590 |
+
all_ones = tf.ones([qlen, klen], dtype=tf.int32)
|
591 |
+
upper_mask = 1 - tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, mlen)
|
592 |
+
if self.same_length:
|
593 |
+
mask_len = klen - self.mem_len
|
594 |
+
mask_shift_len = qlen - tf.nn.relu(mask_len) # Lazy clamping of negatives to zero
|
595 |
+
|
596 |
+
# Use an indicator variable instead of a conditional to keep the compiler happy
|
597 |
+
lower_mask = tf.linalg.band_part(all_ones, -1, 0) - (
|
598 |
+
tf.linalg.band_part(all_ones, mask_shift_len - 1, 0) * tf.cast(mask_shift_len != 0, tf.int32)
|
599 |
+
)
|
600 |
+
dec_attn_mask = upper_mask + lower_mask
|
601 |
+
else:
|
602 |
+
dec_attn_mask = upper_mask
|
603 |
+
|
604 |
+
hids = []
|
605 |
+
attentions = [] if output_attentions else None
|
606 |
+
if self.attn_type == 0: # default
|
607 |
+
pos_seq = tf.range(klen - 1, -1, -1.0)
|
608 |
+
if self.clamp_len > 0:
|
609 |
+
pos_seq = tf.minimum(pos_seq, self.clamp_len)
|
610 |
+
pos_emb = self.pos_emb(pos_seq)
|
611 |
+
|
612 |
+
core_out = self.drop(word_emb, training=training)
|
613 |
+
pos_emb = self.drop(pos_emb, training=training)
|
614 |
+
|
615 |
+
for i, layer in enumerate(self.layers):
|
616 |
+
hids.append(core_out)
|
617 |
+
mems_i = None if mems is None else mems[i]
|
618 |
+
layer_outputs = layer(
|
619 |
+
core_out,
|
620 |
+
pos_emb,
|
621 |
+
dec_attn_mask,
|
622 |
+
mems_i,
|
623 |
+
head_mask[i],
|
624 |
+
output_attentions,
|
625 |
+
training=training,
|
626 |
+
)
|
627 |
+
core_out = layer_outputs[0]
|
628 |
+
if output_attentions:
|
629 |
+
attentions.append(layer_outputs[1])
|
630 |
+
else: # learnable embeddings and absolute embeddings
|
631 |
+
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
|
632 |
+
|
633 |
+
core_out = self.drop(core_out, training=training)
|
634 |
+
|
635 |
+
new_mems = self._update_mems(hids, mems, mlen, qlen)
|
636 |
+
|
637 |
+
# We transpose back here to shape [bsz, len, hidden_dim]
|
638 |
+
core_out = tf.transpose(core_out, perm=(1, 0, 2))
|
639 |
+
|
640 |
+
if output_hidden_states:
|
641 |
+
# Transpose to library standard shape [bsz, len, hidden_dim] and add last layer
|
642 |
+
hids = tuple(tf.transpose(t, perm=(1, 0, 2)) for t in hids)
|
643 |
+
hids = hids + (core_out,)
|
644 |
+
else:
|
645 |
+
hids = None
|
646 |
+
if output_attentions:
|
647 |
+
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
|
648 |
+
attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
|
649 |
+
|
650 |
+
if not return_dict:
|
651 |
+
return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
|
652 |
+
|
653 |
+
return TFTransfoXLModelOutput(
|
654 |
+
last_hidden_state=core_out,
|
655 |
+
mems=new_mems,
|
656 |
+
hidden_states=hids,
|
657 |
+
attentions=attentions,
|
658 |
+
)
|
659 |
+
|
660 |
+
|
661 |
+
class TFTransfoXLPreTrainedModel(TFPreTrainedModel):
|
662 |
+
"""
|
663 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
664 |
+
models.
|
665 |
+
"""
|
666 |
+
|
667 |
+
config_class = TransfoXLConfig
|
668 |
+
base_model_prefix = "transformer"
|
669 |
+
|
670 |
+
|
671 |
+
@dataclass
|
672 |
+
class TFTransfoXLModelOutput(ModelOutput):
|
673 |
+
"""
|
674 |
+
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
675 |
+
|
676 |
+
Args:
|
677 |
+
last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
678 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
679 |
+
mems (`List[tf.Tensor]` of length `config.n_layers`):
|
680 |
+
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
|
681 |
+
input) to speed up sequential decoding. The token ids which have their past given to this model should not
|
682 |
+
be passed as input ids as they have already been computed.
|
683 |
+
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
684 |
+
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
|
685 |
+
`(batch_size, sequence_length, hidden_size)`.
|
686 |
+
|
687 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
688 |
+
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
689 |
+
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
690 |
+
sequence_length)`.
|
691 |
+
|
692 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
693 |
+
heads.
|
694 |
+
"""
|
695 |
+
|
696 |
+
last_hidden_state: tf.Tensor = None
|
697 |
+
mems: List[tf.Tensor] = None
|
698 |
+
hidden_states: Tuple[tf.Tensor] | None = None
|
699 |
+
attentions: Tuple[tf.Tensor] | None = None
|
700 |
+
|
701 |
+
|
702 |
+
@dataclass
|
703 |
+
class TFTransfoXLLMHeadModelOutput(ModelOutput):
|
704 |
+
"""
|
705 |
+
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
706 |
+
|
707 |
+
Args:
|
708 |
+
losses (`tf.Tensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
|
709 |
+
Language modeling losses (not reduced).
|
710 |
+
prediction_scores (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
711 |
+
Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
|
712 |
+
mems (`List[tf.Tensor]` of length `config.n_layers`):
|
713 |
+
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
|
714 |
+
input) to speed up sequential decoding. The token ids which have their past given to this model should not
|
715 |
+
be passed as input ids as they have already been computed.
|
716 |
+
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
717 |
+
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
|
718 |
+
`(batch_size, sequence_length, hidden_size)`.
|
719 |
+
|
720 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
721 |
+
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
722 |
+
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
723 |
+
sequence_length)`.
|
724 |
+
|
725 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
726 |
+
heads.
|
727 |
+
"""
|
728 |
+
|
729 |
+
prediction_scores: tf.Tensor = None
|
730 |
+
mems: List[tf.Tensor] = None
|
731 |
+
hidden_states: Tuple[tf.Tensor] | None = None
|
732 |
+
attentions: Tuple[tf.Tensor] | None = None
|
733 |
+
|
734 |
+
|
735 |
+
@dataclass
|
736 |
+
class TFTransfoXLSequenceClassifierOutputWithPast(ModelOutput):
|
737 |
+
"""
|
738 |
+
Base class for outputs of sentence classification models.
|
739 |
+
|
740 |
+
Args:
|
741 |
+
loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
742 |
+
Classification (or regression if config.num_labels==1) loss.
|
743 |
+
logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
|
744 |
+
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
745 |
+
mems (`List[tf.Tensor]` of length `config.n_layers`):
|
746 |
+
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
|
747 |
+
input) to speed up sequential decoding. The token ids which have their past given to this model should not
|
748 |
+
be passed as input ids as they have already been computed.
|
749 |
+
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
750 |
+
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
|
751 |
+
`(batch_size, sequence_length, hidden_size)`.
|
752 |
+
|
753 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
754 |
+
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
755 |
+
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
756 |
+
sequence_length)`.
|
757 |
+
|
758 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
759 |
+
heads.
|
760 |
+
"""
|
761 |
+
|
762 |
+
loss: tf.Tensor | None = None
|
763 |
+
logits: tf.Tensor = None
|
764 |
+
mems: List[tf.Tensor] = None
|
765 |
+
hidden_states: Tuple[tf.Tensor] | None = None
|
766 |
+
attentions: Tuple[tf.Tensor] | None = None
|
767 |
+
|
768 |
+
|
769 |
+
TRANSFO_XL_START_DOCSTRING = r"""
|
770 |
+
|
771 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
772 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
773 |
+
etc.)
|
774 |
+
|
775 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
776 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
777 |
+
behavior.
|
778 |
+
|
779 |
+
<Tip>
|
780 |
+
|
781 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
782 |
+
|
783 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
784 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
785 |
+
|
786 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
787 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
788 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
789 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
790 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
791 |
+
positional argument:
|
792 |
+
|
793 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
794 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
795 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
796 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
797 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
798 |
+
|
799 |
+
Note that when creating models and layers with
|
800 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
801 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
802 |
+
|
803 |
+
</Tip>
|
804 |
+
|
805 |
+
Parameters:
|
806 |
+
config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
|
807 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
808 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
809 |
+
"""
|
810 |
+
|
811 |
+
TRANSFO_XL_INPUTS_DOCSTRING = r"""
|
812 |
+
Args:
|
813 |
+
input_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`):
|
814 |
+
Indices of input sequence tokens in the vocabulary.
|
815 |
+
|
816 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
817 |
+
[`PreTrainedTokenizer.encode`] for details.
|
818 |
+
|
819 |
+
[What are input IDs?](../glossary#input-ids)
|
820 |
+
mems (`List[tf.Tensor]` of length `config.n_layers`):
|
821 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
822 |
+
`mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
|
823 |
+
given to this model should not be passed as `input_ids` as they have already been computed.
|
824 |
+
head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
825 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
826 |
+
|
827 |
+
- 1 indicates the head is **not masked**,
|
828 |
+
- 0 indicates the head is **masked**.
|
829 |
+
inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
830 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
831 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
832 |
+
model's internal embedding lookup matrix.
|
833 |
+
output_attentions (`bool`, *optional*):
|
834 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
835 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
836 |
+
config will be used instead.
|
837 |
+
output_hidden_states (`bool`, *optional*):
|
838 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
839 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
840 |
+
used instead.
|
841 |
+
return_dict (`bool`, *optional*):
|
842 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
843 |
+
eager mode, in graph mode the value will always be set to True.
|
844 |
+
training (`bool`, *optional*, defaults to `False`):
|
845 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
846 |
+
behaviors between training and evaluation).
|
847 |
+
"""
|
848 |
+
|
849 |
+
|
850 |
+
@add_start_docstrings(
|
851 |
+
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
|
852 |
+
TRANSFO_XL_START_DOCSTRING,
|
853 |
+
)
|
854 |
+
class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
|
855 |
+
def __init__(self, config, *inputs, **kwargs):
|
856 |
+
super().__init__(config, *inputs, **kwargs)
|
857 |
+
self.transformer = TFTransfoXLMainLayer(config, name="transformer")
|
858 |
+
|
859 |
+
@unpack_inputs
|
860 |
+
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
|
861 |
+
@add_code_sample_docstrings(
|
862 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
863 |
+
output_type=TFTransfoXLModelOutput,
|
864 |
+
config_class=_CONFIG_FOR_DOC,
|
865 |
+
)
|
866 |
+
def call(
|
867 |
+
self,
|
868 |
+
input_ids: TFModelInputType | None = None,
|
869 |
+
mems: List[tf.Tensor] | None = None,
|
870 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
871 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
872 |
+
output_attentions: bool | None = None,
|
873 |
+
output_hidden_states: bool | None = None,
|
874 |
+
return_dict: bool | None = None,
|
875 |
+
training: bool = False,
|
876 |
+
) -> TFTransfoXLModelOutput | Tuple[tf.Tensor]:
|
877 |
+
outputs = self.transformer(
|
878 |
+
input_ids=input_ids,
|
879 |
+
mems=mems,
|
880 |
+
head_mask=head_mask,
|
881 |
+
inputs_embeds=inputs_embeds,
|
882 |
+
output_attentions=output_attentions,
|
883 |
+
output_hidden_states=output_hidden_states,
|
884 |
+
return_dict=return_dict,
|
885 |
+
training=training,
|
886 |
+
)
|
887 |
+
|
888 |
+
return outputs
|
889 |
+
|
890 |
+
|
891 |
+
@add_start_docstrings(
|
892 |
+
"""
|
893 |
+
The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
|
894 |
+
input embeddings)
|
895 |
+
""",
|
896 |
+
TRANSFO_XL_START_DOCSTRING,
|
897 |
+
)
|
898 |
+
class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
|
899 |
+
def __init__(self, config):
|
900 |
+
super().__init__(config)
|
901 |
+
self.transformer = TFTransfoXLMainLayer(config, name="transformer")
|
902 |
+
self.sample_softmax = config.sample_softmax
|
903 |
+
assert self.sample_softmax <= 0, (
|
904 |
+
"Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
|
905 |
+
" https://github.com/huggingface/transformers/issues/3310"
|
906 |
+
)
|
907 |
+
|
908 |
+
self.crit = TFAdaptiveSoftmaxMask(
|
909 |
+
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name="crit"
|
910 |
+
)
|
911 |
+
|
912 |
+
def _resize_token_embeddings(self, new_num_tokens):
|
913 |
+
raise NotImplementedError()
|
914 |
+
|
915 |
+
def get_output_embeddings(self):
|
916 |
+
"""Double-check if you are using adaptive softmax."""
|
917 |
+
if len(self.crit.out_layers) > 0:
|
918 |
+
return self.crit.out_layers[-1]
|
919 |
+
return None
|
920 |
+
|
921 |
+
def reset_memory_length(self, mem_len):
|
922 |
+
self.transformer.reset_memory_length(mem_len)
|
923 |
+
|
924 |
+
def init_mems(self, bsz):
|
925 |
+
return self.transformer.init_mems(bsz)
|
926 |
+
|
927 |
+
@unpack_inputs
|
928 |
+
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
|
929 |
+
@add_code_sample_docstrings(
|
930 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
931 |
+
output_type=TFTransfoXLLMHeadModelOutput,
|
932 |
+
config_class=_CONFIG_FOR_DOC,
|
933 |
+
)
|
934 |
+
def call(
|
935 |
+
self,
|
936 |
+
input_ids: TFModelInputType | None = None,
|
937 |
+
mems: List[tf.Tensor] | None = None,
|
938 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
939 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
940 |
+
output_attentions: bool | None = None,
|
941 |
+
output_hidden_states: bool | None = None,
|
942 |
+
return_dict: bool | None = None,
|
943 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
944 |
+
training: bool = False,
|
945 |
+
) -> TFTransfoXLLMHeadModelOutput | Tuple[tf.Tensor]:
|
946 |
+
if input_ids is not None:
|
947 |
+
bsz, tgt_len = shape_list(input_ids)[:2]
|
948 |
+
else:
|
949 |
+
bsz, tgt_len = shape_list(inputs_embeds)[:2]
|
950 |
+
|
951 |
+
transformer_outputs = self.transformer(
|
952 |
+
input_ids,
|
953 |
+
mems,
|
954 |
+
head_mask,
|
955 |
+
inputs_embeds,
|
956 |
+
output_attentions,
|
957 |
+
output_hidden_states,
|
958 |
+
return_dict,
|
959 |
+
training=training,
|
960 |
+
)
|
961 |
+
|
962 |
+
last_hidden = transformer_outputs[0]
|
963 |
+
pred_hid = last_hidden[:, -tgt_len:]
|
964 |
+
|
965 |
+
softmax_output = self.crit(pred_hid, labels, training=training)
|
966 |
+
prediction_scores = softmax_output if labels is None else ()
|
967 |
+
|
968 |
+
if not return_dict:
|
969 |
+
return (prediction_scores,) + transformer_outputs[1:]
|
970 |
+
|
971 |
+
return TFTransfoXLLMHeadModelOutput(
|
972 |
+
prediction_scores=prediction_scores,
|
973 |
+
mems=transformer_outputs.mems,
|
974 |
+
hidden_states=transformer_outputs.hidden_states,
|
975 |
+
attentions=transformer_outputs.attentions,
|
976 |
+
)
|
977 |
+
|
978 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
|
979 |
+
inputs = {}
|
980 |
+
|
981 |
+
# if past is defined in model kwargs then use it for faster decoding
|
982 |
+
if past_key_values:
|
983 |
+
input_ids = tf.expand_dims(input_ids[:, -1], axis=-1)
|
984 |
+
else:
|
985 |
+
input_ids = input_ids
|
986 |
+
|
987 |
+
return inputs
|
988 |
+
|
989 |
+
# Adapted from the torch tie_weights function
|
990 |
+
def tf_to_pt_weight_rename(self, tf_weight):
|
991 |
+
if self.config.tie_word_embeddings and "crit.out_layers" in tf_weight:
|
992 |
+
return tf_weight, tf_weight.replace("crit.out_layers", "transformer.word_emb.emb_layers")
|
993 |
+
elif self.config.tie_projs and "crit.out_projs" in tf_weight:
|
994 |
+
for i, tie_proj in enumerate(self.config.tie_projs):
|
995 |
+
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
|
996 |
+
# self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
|
997 |
+
return tf_weight, tf_weight.replace(f"crit.out_projs.{i}", "transformer.word_emb.emb_projs.0")
|
998 |
+
elif tie_proj and self.config.div_val != 1:
|
999 |
+
# self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
|
1000 |
+
return tf_weight, tf_weight.replace("crit.out_projs", "transformer.word_emb.emb_projs")
|
1001 |
+
else:
|
1002 |
+
return (tf_weight,)
|
1003 |
+
|
1004 |
+
|
1005 |
+
@add_start_docstrings(
|
1006 |
+
"""
|
1007 |
+
The Transfo XL Model transformer with a sequence classification head on top (linear layer).
|
1008 |
+
|
1009 |
+
[`TFTransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
|
1010 |
+
models (e.g. GPT-1,GPT-2) do.
|
1011 |
+
|
1012 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1013 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1014 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1015 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1016 |
+
each row of the batch).
|
1017 |
+
""",
|
1018 |
+
TRANSFO_XL_START_DOCSTRING,
|
1019 |
+
)
|
1020 |
+
class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenceClassificationLoss):
|
1021 |
+
def __init__(self, config, *inputs, **kwargs):
|
1022 |
+
super().__init__(config, *inputs, **kwargs)
|
1023 |
+
self.num_labels = config.num_labels
|
1024 |
+
self.score = keras.layers.Dense(
|
1025 |
+
config.num_labels,
|
1026 |
+
kernel_initializer=get_initializer(config.init_range),
|
1027 |
+
name="score",
|
1028 |
+
use_bias=False,
|
1029 |
+
)
|
1030 |
+
self.transformer = TFTransfoXLMainLayer(config, name="transformer")
|
1031 |
+
|
1032 |
+
def get_output_embeddings(self):
|
1033 |
+
# Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
|
1034 |
+
logger.warning(
|
1035 |
+
"Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
|
1036 |
+
"in transformers v4.32."
|
1037 |
+
)
|
1038 |
+
return self.transformer.word_emb
|
1039 |
+
|
1040 |
+
@unpack_inputs
|
1041 |
+
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
|
1042 |
+
@add_code_sample_docstrings(
|
1043 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1044 |
+
output_type=TFTransfoXLSequenceClassifierOutputWithPast,
|
1045 |
+
config_class=_CONFIG_FOR_DOC,
|
1046 |
+
)
|
1047 |
+
def call(
|
1048 |
+
self,
|
1049 |
+
input_ids: TFModelInputType | None = None,
|
1050 |
+
mems: List[tf.Tensor] | None = None,
|
1051 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
1052 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
1053 |
+
output_attentions: Optional[bool] = None,
|
1054 |
+
output_hidden_states: Optional[bool] = None,
|
1055 |
+
return_dict: Optional[bool] = None,
|
1056 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
1057 |
+
training: Optional[bool] = False,
|
1058 |
+
) -> Union[Tuple, TFTransfoXLSequenceClassifierOutputWithPast]:
|
1059 |
+
r"""
|
1060 |
+
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1061 |
+
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
|
1062 |
+
config.vocab_size - 1]`.
|
1063 |
+
"""
|
1064 |
+
transformer_outputs = self.transformer(
|
1065 |
+
input_ids=input_ids,
|
1066 |
+
mems=mems,
|
1067 |
+
head_mask=head_mask,
|
1068 |
+
inputs_embeds=inputs_embeds,
|
1069 |
+
output_attentions=output_attentions,
|
1070 |
+
output_hidden_states=output_hidden_states,
|
1071 |
+
return_dict=return_dict,
|
1072 |
+
training=training,
|
1073 |
+
)
|
1074 |
+
|
1075 |
+
hidden_states = transformer_outputs[0]
|
1076 |
+
logits = self.score(hidden_states)
|
1077 |
+
in_logits = None
|
1078 |
+
if self.config.pad_token_id is None:
|
1079 |
+
sequence_lengths = -1
|
1080 |
+
else:
|
1081 |
+
if input_ids is not None:
|
1082 |
+
sequence_lengths = (
|
1083 |
+
tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
|
1084 |
+
- 1
|
1085 |
+
)
|
1086 |
+
sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
|
1087 |
+
in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
|
1088 |
+
else:
|
1089 |
+
sequence_lengths = -1
|
1090 |
+
logger.warning(
|
1091 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
1092 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
1093 |
+
)
|
1094 |
+
loss = None
|
1095 |
+
|
1096 |
+
if labels is not None:
|
1097 |
+
if input_ids is not None:
|
1098 |
+
batch_size, sequence_length = shape_list(input_ids)[:2]
|
1099 |
+
else:
|
1100 |
+
batch_size, sequence_length = shape_list(inputs_embeds)[:2]
|
1101 |
+
assert (
|
1102 |
+
self.config.pad_token_id is not None or batch_size == 1
|
1103 |
+
), "Cannot handle batch sizes > 1 if no padding token is defined."
|
1104 |
+
|
1105 |
+
if not tf.is_tensor(sequence_lengths):
|
1106 |
+
in_logits = logits[0:batch_size, sequence_lengths]
|
1107 |
+
|
1108 |
+
loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
|
1109 |
+
|
1110 |
+
pooled_logits = in_logits if in_logits is not None else logits
|
1111 |
+
|
1112 |
+
if not return_dict:
|
1113 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1114 |
+
return ((loss,) + output) if loss is not None else output
|
1115 |
+
|
1116 |
+
return TFTransfoXLSequenceClassifierOutputWithPast(
|
1117 |
+
loss=loss,
|
1118 |
+
logits=pooled_logits,
|
1119 |
+
mems=transformer_outputs.mems,
|
1120 |
+
hidden_states=transformer_outputs.hidden_states,
|
1121 |
+
attentions=transformer_outputs.attentions,
|
1122 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""
|
17 |
+
A TF 2.0 Adaptive Softmax for Transformer XL model.
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
import tensorflow as tf
|
22 |
+
|
23 |
+
from ....modeling_tf_utils import keras
|
24 |
+
from ....tf_utils import shape_list
|
25 |
+
|
26 |
+
|
27 |
+
class TFAdaptiveSoftmaxMask(keras.layers.Layer):
|
28 |
+
def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs):
|
29 |
+
super().__init__(**kwargs)
|
30 |
+
|
31 |
+
self.vocab_size = vocab_size
|
32 |
+
self.d_embed = d_embed
|
33 |
+
self.d_proj = d_proj
|
34 |
+
|
35 |
+
self.cutoffs = cutoffs + [vocab_size]
|
36 |
+
self.cutoff_ends = [0] + self.cutoffs
|
37 |
+
self.div_val = div_val
|
38 |
+
|
39 |
+
self.shortlist_size = self.cutoffs[0]
|
40 |
+
self.n_clusters = len(self.cutoffs) - 1
|
41 |
+
self.head_size = self.shortlist_size + self.n_clusters
|
42 |
+
self.keep_order = keep_order
|
43 |
+
|
44 |
+
self.out_layers = []
|
45 |
+
self.out_projs = []
|
46 |
+
|
47 |
+
def build(self, input_shape):
|
48 |
+
if self.n_clusters > 0:
|
49 |
+
self.cluster_weight = self.add_weight(
|
50 |
+
shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight"
|
51 |
+
)
|
52 |
+
self.cluster_bias = self.add_weight(
|
53 |
+
shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias"
|
54 |
+
)
|
55 |
+
|
56 |
+
if self.div_val == 1:
|
57 |
+
for i in range(len(self.cutoffs)):
|
58 |
+
if self.d_proj != self.d_embed:
|
59 |
+
weight = self.add_weight(
|
60 |
+
shape=(self.d_embed, self.d_proj),
|
61 |
+
initializer="zeros",
|
62 |
+
trainable=True,
|
63 |
+
name=f"out_projs_._{i}",
|
64 |
+
)
|
65 |
+
self.out_projs.append(weight)
|
66 |
+
else:
|
67 |
+
self.out_projs.append(None)
|
68 |
+
weight = self.add_weight(
|
69 |
+
shape=(self.vocab_size, self.d_embed),
|
70 |
+
initializer="zeros",
|
71 |
+
trainable=True,
|
72 |
+
name=f"out_layers_._{i}_._weight",
|
73 |
+
)
|
74 |
+
bias = self.add_weight(
|
75 |
+
shape=(self.vocab_size,),
|
76 |
+
initializer="zeros",
|
77 |
+
trainable=True,
|
78 |
+
name=f"out_layers_._{i}_._bias",
|
79 |
+
)
|
80 |
+
self.out_layers.append((weight, bias))
|
81 |
+
else:
|
82 |
+
for i in range(len(self.cutoffs)):
|
83 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
84 |
+
d_emb_i = self.d_embed // (self.div_val**i)
|
85 |
+
|
86 |
+
weight = self.add_weight(
|
87 |
+
shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}"
|
88 |
+
)
|
89 |
+
self.out_projs.append(weight)
|
90 |
+
weight = self.add_weight(
|
91 |
+
shape=(r_idx - l_idx, d_emb_i),
|
92 |
+
initializer="zeros",
|
93 |
+
trainable=True,
|
94 |
+
name=f"out_layers_._{i}_._weight",
|
95 |
+
)
|
96 |
+
bias = self.add_weight(
|
97 |
+
shape=(r_idx - l_idx,),
|
98 |
+
initializer="zeros",
|
99 |
+
trainable=True,
|
100 |
+
name=f"out_layers_._{i}_._bias",
|
101 |
+
)
|
102 |
+
self.out_layers.append((weight, bias))
|
103 |
+
super().build(input_shape)
|
104 |
+
|
105 |
+
@staticmethod
|
106 |
+
def _logit(x, W, b, proj=None):
|
107 |
+
y = x
|
108 |
+
if proj is not None:
|
109 |
+
y = tf.einsum("ibd,ed->ibe", y, proj)
|
110 |
+
return tf.einsum("ibd,nd->ibn", y, W) + b
|
111 |
+
|
112 |
+
@staticmethod
|
113 |
+
def _gather_logprob(logprob, target):
|
114 |
+
lp_size = shape_list(logprob)
|
115 |
+
r = tf.range(lp_size[0], dtype=target.dtype)
|
116 |
+
idx = tf.stack([r, target], 1)
|
117 |
+
return tf.gather_nd(logprob, idx)
|
118 |
+
|
119 |
+
def call(self, hidden, target, return_mean=True, training=False):
|
120 |
+
head_logprob = 0
|
121 |
+
if self.n_clusters == 0:
|
122 |
+
output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
|
123 |
+
if target is not None:
|
124 |
+
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
|
125 |
+
out = tf.nn.log_softmax(output, axis=-1)
|
126 |
+
else:
|
127 |
+
hidden_sizes = shape_list(hidden)
|
128 |
+
out = []
|
129 |
+
loss = tf.zeros(hidden_sizes[:2])
|
130 |
+
for i in range(len(self.cutoffs)):
|
131 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
132 |
+
if target is not None:
|
133 |
+
mask = (target >= l_idx) & (target < r_idx)
|
134 |
+
mask_idx = tf.where(mask)
|
135 |
+
cur_target = tf.boolean_mask(target, mask) - l_idx
|
136 |
+
|
137 |
+
if self.div_val == 1:
|
138 |
+
cur_W = self.out_layers[0][0][l_idx:r_idx]
|
139 |
+
cur_b = self.out_layers[0][1][l_idx:r_idx]
|
140 |
+
else:
|
141 |
+
cur_W = self.out_layers[i][0]
|
142 |
+
cur_b = self.out_layers[i][1]
|
143 |
+
|
144 |
+
if i == 0:
|
145 |
+
cur_W = tf.concat([cur_W, self.cluster_weight], 0)
|
146 |
+
cur_b = tf.concat([cur_b, self.cluster_bias], 0)
|
147 |
+
|
148 |
+
head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
|
149 |
+
head_logprob = tf.nn.log_softmax(head_logit)
|
150 |
+
out.append(head_logprob[..., : self.cutoffs[0]])
|
151 |
+
if target is not None:
|
152 |
+
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
|
153 |
+
cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
|
154 |
+
else:
|
155 |
+
tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
|
156 |
+
tail_logprob = tf.nn.log_softmax(tail_logit)
|
157 |
+
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
|
158 |
+
logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
|
159 |
+
out.append(logprob_i)
|
160 |
+
if target is not None:
|
161 |
+
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
|
162 |
+
cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
|
163 |
+
cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
|
164 |
+
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
|
165 |
+
if target is not None:
|
166 |
+
loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss))
|
167 |
+
out = tf.concat(out, axis=-1)
|
168 |
+
|
169 |
+
if target is not None:
|
170 |
+
if return_mean:
|
171 |
+
loss = tf.reduce_mean(loss)
|
172 |
+
# Add the training-time loss value to the layer using `self.add_loss()`.
|
173 |
+
self.add_loss(loss)
|
174 |
+
|
175 |
+
# Log the loss as a metric (we could log arbitrary metrics,
|
176 |
+
# including different metrics for training and inference.
|
177 |
+
self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "")
|
178 |
+
|
179 |
+
return out
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
ADDED
@@ -0,0 +1,1295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""
|
17 |
+
PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular
|
18 |
+
https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
|
19 |
+
"""
|
20 |
+
import warnings
|
21 |
+
from dataclasses import dataclass
|
22 |
+
from typing import List, Optional, Tuple, Union
|
23 |
+
|
24 |
+
import torch
|
25 |
+
from torch import nn
|
26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
27 |
+
|
28 |
+
from ....modeling_utils import PreTrainedModel
|
29 |
+
from ....utils import (
|
30 |
+
ModelOutput,
|
31 |
+
add_code_sample_docstrings,
|
32 |
+
add_start_docstrings,
|
33 |
+
add_start_docstrings_to_model_forward,
|
34 |
+
logging,
|
35 |
+
)
|
36 |
+
from .configuration_transfo_xl import TransfoXLConfig
|
37 |
+
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
|
38 |
+
|
39 |
+
|
40 |
+
logger = logging.get_logger(__name__)
|
41 |
+
|
42 |
+
_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
|
43 |
+
_CONFIG_FOR_DOC = "TransfoXLConfig"
|
44 |
+
|
45 |
+
|
46 |
+
from .._archive_maps import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
47 |
+
|
48 |
+
|
49 |
+
def build_tf_to_pytorch_map(model, config):
|
50 |
+
"""
|
51 |
+
A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original
|
52 |
+
PyTorch model as possible.
|
53 |
+
"""
|
54 |
+
tf_to_pt_map = {}
|
55 |
+
|
56 |
+
if hasattr(model, "transformer"):
|
57 |
+
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
|
58 |
+
tf_to_pt_map.update(
|
59 |
+
{
|
60 |
+
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
|
61 |
+
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
|
62 |
+
}
|
63 |
+
)
|
64 |
+
for i, (out_l, proj_l, tie_proj) in enumerate(
|
65 |
+
zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
|
66 |
+
):
|
67 |
+
layer_str = f"transformer/adaptive_softmax/cutoff_{i}/"
|
68 |
+
if config.tie_word_embeddings:
|
69 |
+
tf_to_pt_map.update({layer_str + "b": out_l.bias})
|
70 |
+
else:
|
71 |
+
raise NotImplementedError
|
72 |
+
# I don't think this is implemented in the TF code
|
73 |
+
tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
|
74 |
+
if not tie_proj:
|
75 |
+
tf_to_pt_map.update({layer_str + "proj": proj_l})
|
76 |
+
# Now load the rest of the transformer
|
77 |
+
model = model.transformer
|
78 |
+
|
79 |
+
# Embeddings
|
80 |
+
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
|
81 |
+
layer_str = f"transformer/adaptive_embed/cutoff_{i}/"
|
82 |
+
tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
|
83 |
+
|
84 |
+
# Transformer blocks
|
85 |
+
for i, b in enumerate(model.layers):
|
86 |
+
layer_str = f"transformer/layer_{i}/"
|
87 |
+
tf_to_pt_map.update(
|
88 |
+
{
|
89 |
+
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
|
90 |
+
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
|
91 |
+
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
|
92 |
+
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
|
93 |
+
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
|
94 |
+
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
|
95 |
+
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
|
96 |
+
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
|
97 |
+
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
|
98 |
+
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
|
99 |
+
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
|
100 |
+
}
|
101 |
+
)
|
102 |
+
|
103 |
+
# Relative positioning biases
|
104 |
+
if config.untie_r:
|
105 |
+
r_r_list = []
|
106 |
+
r_w_list = []
|
107 |
+
for b in model.layers:
|
108 |
+
r_r_list.append(b.dec_attn.r_r_bias)
|
109 |
+
r_w_list.append(b.dec_attn.r_w_bias)
|
110 |
+
else:
|
111 |
+
r_r_list = [model.r_r_bias]
|
112 |
+
r_w_list = [model.r_w_bias]
|
113 |
+
tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
|
114 |
+
return tf_to_pt_map
|
115 |
+
|
116 |
+
|
117 |
+
def load_tf_weights_in_transfo_xl(model, config, tf_path):
|
118 |
+
"""Load tf checkpoints in a pytorch model"""
|
119 |
+
try:
|
120 |
+
import numpy as np
|
121 |
+
import tensorflow as tf
|
122 |
+
except ImportError:
|
123 |
+
logger.error(
|
124 |
+
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
|
125 |
+
"https://www.tensorflow.org/install/ for installation instructions."
|
126 |
+
)
|
127 |
+
raise
|
128 |
+
# Build TF to PyTorch weights loading map
|
129 |
+
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
|
130 |
+
|
131 |
+
# Load weights from TF model
|
132 |
+
init_vars = tf.train.list_variables(tf_path)
|
133 |
+
tf_weights = {}
|
134 |
+
for name, shape in init_vars:
|
135 |
+
logger.info(f"Loading TF weight {name} with shape {shape}")
|
136 |
+
array = tf.train.load_variable(tf_path, name)
|
137 |
+
tf_weights[name] = array
|
138 |
+
|
139 |
+
for name, pointer in tf_to_pt_map.items():
|
140 |
+
assert name in tf_weights
|
141 |
+
array = tf_weights[name]
|
142 |
+
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
|
143 |
+
# which are not required for using pretrained model
|
144 |
+
if "kernel" in name or "proj" in name:
|
145 |
+
array = np.transpose(array)
|
146 |
+
if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
|
147 |
+
# Here we will split the TF weights
|
148 |
+
assert len(pointer) == array.shape[0]
|
149 |
+
for i, p_i in enumerate(pointer):
|
150 |
+
arr_i = array[i, ...]
|
151 |
+
try:
|
152 |
+
assert p_i.shape == arr_i.shape
|
153 |
+
except AssertionError as e:
|
154 |
+
e.args += (p_i.shape, arr_i.shape)
|
155 |
+
raise
|
156 |
+
logger.info(f"Initialize PyTorch weight {name} for layer {i}")
|
157 |
+
p_i.data = torch.from_numpy(arr_i)
|
158 |
+
else:
|
159 |
+
try:
|
160 |
+
assert (
|
161 |
+
pointer.shape == array.shape
|
162 |
+
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
|
163 |
+
except AssertionError as e:
|
164 |
+
e.args += (pointer.shape, array.shape)
|
165 |
+
raise
|
166 |
+
logger.info(f"Initialize PyTorch weight {name}")
|
167 |
+
pointer.data = torch.from_numpy(array)
|
168 |
+
tf_weights.pop(name, None)
|
169 |
+
tf_weights.pop(name + "/Adam", None)
|
170 |
+
tf_weights.pop(name + "/Adam_1", None)
|
171 |
+
|
172 |
+
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
|
173 |
+
return model
|
174 |
+
|
175 |
+
|
176 |
+
class PositionalEmbedding(nn.Module):
|
177 |
+
def __init__(self, demb):
|
178 |
+
super().__init__()
|
179 |
+
|
180 |
+
self.demb = demb
|
181 |
+
|
182 |
+
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
|
183 |
+
self.register_buffer("inv_freq", inv_freq)
|
184 |
+
|
185 |
+
def forward(self, pos_seq, bsz=None):
|
186 |
+
sinusoid_inp = torch.outer(pos_seq, self.inv_freq)
|
187 |
+
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
|
188 |
+
|
189 |
+
if bsz is not None:
|
190 |
+
return pos_emb[:, None, :].expand(-1, bsz, -1)
|
191 |
+
else:
|
192 |
+
return pos_emb[:, None, :]
|
193 |
+
|
194 |
+
|
195 |
+
class PositionwiseFF(nn.Module):
|
196 |
+
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
|
197 |
+
super().__init__()
|
198 |
+
|
199 |
+
self.d_model = d_model
|
200 |
+
self.d_inner = d_inner
|
201 |
+
self.dropout = dropout
|
202 |
+
|
203 |
+
self.CoreNet = nn.Sequential(
|
204 |
+
nn.Linear(d_model, d_inner),
|
205 |
+
nn.ReLU(inplace=True),
|
206 |
+
nn.Dropout(dropout),
|
207 |
+
nn.Linear(d_inner, d_model),
|
208 |
+
nn.Dropout(dropout),
|
209 |
+
)
|
210 |
+
|
211 |
+
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
|
212 |
+
|
213 |
+
self.pre_lnorm = pre_lnorm
|
214 |
+
|
215 |
+
def forward(self, inp):
|
216 |
+
if self.pre_lnorm:
|
217 |
+
# layer normalization + positionwise feed-forward
|
218 |
+
core_out = self.CoreNet(self.layer_norm(inp))
|
219 |
+
|
220 |
+
# residual connection
|
221 |
+
output = core_out + inp
|
222 |
+
else:
|
223 |
+
# positionwise feed-forward
|
224 |
+
core_out = self.CoreNet(inp)
|
225 |
+
|
226 |
+
# residual connection + layer normalization
|
227 |
+
output = self.layer_norm(inp + core_out)
|
228 |
+
|
229 |
+
return output
|
230 |
+
|
231 |
+
|
232 |
+
class RelPartialLearnableMultiHeadAttn(nn.Module):
|
233 |
+
def __init__(
|
234 |
+
self,
|
235 |
+
n_head,
|
236 |
+
d_model,
|
237 |
+
d_head,
|
238 |
+
dropout,
|
239 |
+
dropatt=0,
|
240 |
+
pre_lnorm=False,
|
241 |
+
r_r_bias=None,
|
242 |
+
r_w_bias=None,
|
243 |
+
layer_norm_epsilon=1e-5,
|
244 |
+
):
|
245 |
+
super().__init__()
|
246 |
+
|
247 |
+
self.n_head = n_head
|
248 |
+
self.d_model = d_model
|
249 |
+
self.d_head = d_head
|
250 |
+
self.dropout = dropout
|
251 |
+
|
252 |
+
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
|
253 |
+
|
254 |
+
self.drop = nn.Dropout(dropout)
|
255 |
+
self.dropatt = nn.Dropout(dropatt)
|
256 |
+
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
|
257 |
+
|
258 |
+
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
|
259 |
+
|
260 |
+
self.scale = 1 / (d_head**0.5)
|
261 |
+
|
262 |
+
self.pre_lnorm = pre_lnorm
|
263 |
+
|
264 |
+
if r_r_bias is None or r_w_bias is None: # Biases are not shared
|
265 |
+
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
|
266 |
+
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
|
267 |
+
else:
|
268 |
+
self.r_r_bias = r_r_bias
|
269 |
+
self.r_w_bias = r_w_bias
|
270 |
+
|
271 |
+
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
|
272 |
+
|
273 |
+
def _rel_shift(self, x):
|
274 |
+
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
|
275 |
+
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
|
276 |
+
x_padded = torch.cat([zero_pad, x], dim=1)
|
277 |
+
|
278 |
+
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
|
279 |
+
x_padded = x_padded.view(*x_padded_shape)
|
280 |
+
|
281 |
+
x = x_padded[1:].view_as(x)
|
282 |
+
|
283 |
+
return x
|
284 |
+
|
285 |
+
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False):
|
286 |
+
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
|
287 |
+
|
288 |
+
if mems is not None:
|
289 |
+
cat = torch.cat([mems, w], 0)
|
290 |
+
if self.pre_lnorm:
|
291 |
+
w_heads = self.qkv_net(self.layer_norm(cat))
|
292 |
+
else:
|
293 |
+
w_heads = self.qkv_net(cat)
|
294 |
+
r_head_k = self.r_net(r)
|
295 |
+
|
296 |
+
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
|
297 |
+
w_head_q = w_head_q[-qlen:]
|
298 |
+
else:
|
299 |
+
if self.pre_lnorm:
|
300 |
+
w_heads = self.qkv_net(self.layer_norm(w))
|
301 |
+
else:
|
302 |
+
w_heads = self.qkv_net(w)
|
303 |
+
r_head_k = self.r_net(r)
|
304 |
+
|
305 |
+
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
|
306 |
+
|
307 |
+
klen = w_head_k.size(0)
|
308 |
+
|
309 |
+
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
|
310 |
+
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
|
311 |
+
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
|
312 |
+
|
313 |
+
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
|
314 |
+
|
315 |
+
# compute attention score
|
316 |
+
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
|
317 |
+
AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
|
318 |
+
|
319 |
+
rr_head_q = w_head_q + self.r_r_bias
|
320 |
+
BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
|
321 |
+
BD = self._rel_shift(BD)
|
322 |
+
|
323 |
+
# [qlen x klen x bsz x n_head]
|
324 |
+
attn_score = AC + BD
|
325 |
+
attn_score.mul_(self.scale)
|
326 |
+
|
327 |
+
mask_value = torch.finfo(attn_score.dtype).min
|
328 |
+
|
329 |
+
# compute attention probability
|
330 |
+
if attn_mask is not None and torch.sum(attn_mask).item():
|
331 |
+
attn_mask = attn_mask == 1 # Switch to bool
|
332 |
+
if attn_mask.dim() == 2:
|
333 |
+
attn_score = (
|
334 |
+
attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score)
|
335 |
+
)
|
336 |
+
elif attn_mask.dim() == 3:
|
337 |
+
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score)
|
338 |
+
|
339 |
+
# [qlen x klen x bsz x n_head]
|
340 |
+
attn_prob = nn.functional.softmax(attn_score, dim=1)
|
341 |
+
attn_prob = self.dropatt(attn_prob)
|
342 |
+
|
343 |
+
# Mask heads if we want to
|
344 |
+
if head_mask is not None:
|
345 |
+
attn_prob = attn_prob * head_mask
|
346 |
+
|
347 |
+
# compute attention vector
|
348 |
+
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
|
349 |
+
|
350 |
+
# [qlen x bsz x n_head x d_head]
|
351 |
+
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
|
352 |
+
|
353 |
+
# linear projection
|
354 |
+
attn_out = self.o_net(attn_vec)
|
355 |
+
attn_out = self.drop(attn_out)
|
356 |
+
|
357 |
+
if self.pre_lnorm:
|
358 |
+
# residual connection
|
359 |
+
outputs = [w + attn_out]
|
360 |
+
else:
|
361 |
+
# residual connection + layer normalization
|
362 |
+
outputs = [self.layer_norm(w + attn_out)]
|
363 |
+
|
364 |
+
if output_attentions:
|
365 |
+
outputs.append(attn_prob)
|
366 |
+
|
367 |
+
return outputs
|
368 |
+
|
369 |
+
|
370 |
+
class RelPartialLearnableDecoderLayer(nn.Module):
|
371 |
+
def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
|
372 |
+
super().__init__()
|
373 |
+
|
374 |
+
self.dec_attn = RelPartialLearnableMultiHeadAttn(
|
375 |
+
n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
|
376 |
+
)
|
377 |
+
self.pos_ff = PositionwiseFF(
|
378 |
+
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
|
379 |
+
)
|
380 |
+
|
381 |
+
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False):
|
382 |
+
attn_outputs = self.dec_attn(
|
383 |
+
dec_inp,
|
384 |
+
r,
|
385 |
+
attn_mask=dec_attn_mask,
|
386 |
+
mems=mems,
|
387 |
+
head_mask=head_mask,
|
388 |
+
output_attentions=output_attentions,
|
389 |
+
)
|
390 |
+
ff_output = self.pos_ff(attn_outputs[0])
|
391 |
+
|
392 |
+
outputs = [ff_output] + attn_outputs[1:]
|
393 |
+
|
394 |
+
return outputs
|
395 |
+
|
396 |
+
|
397 |
+
class AdaptiveEmbedding(nn.Module):
|
398 |
+
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
|
399 |
+
super().__init__()
|
400 |
+
|
401 |
+
self.n_token = n_token
|
402 |
+
self.d_embed = d_embed
|
403 |
+
|
404 |
+
self.cutoffs = cutoffs + [n_token]
|
405 |
+
self.div_val = div_val
|
406 |
+
self.d_proj = d_proj
|
407 |
+
|
408 |
+
self.emb_scale = d_proj**0.5
|
409 |
+
|
410 |
+
self.cutoff_ends = [0] + self.cutoffs
|
411 |
+
|
412 |
+
self.emb_layers = nn.ModuleList()
|
413 |
+
self.emb_projs = nn.ParameterList()
|
414 |
+
if div_val == 1:
|
415 |
+
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
|
416 |
+
if d_proj != d_embed:
|
417 |
+
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
|
418 |
+
else:
|
419 |
+
for i in range(len(self.cutoffs)):
|
420 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
421 |
+
d_emb_i = d_embed // (div_val**i)
|
422 |
+
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
|
423 |
+
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
|
424 |
+
|
425 |
+
def forward(self, inp):
|
426 |
+
if self.div_val == 1:
|
427 |
+
embed = self.emb_layers[0](inp)
|
428 |
+
if self.d_proj != self.d_embed:
|
429 |
+
embed = nn.functional.linear(embed, self.emb_projs[0])
|
430 |
+
else:
|
431 |
+
param = next(self.parameters())
|
432 |
+
inp_flat = inp.view(-1)
|
433 |
+
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
|
434 |
+
for i in range(len(self.cutoffs)):
|
435 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
436 |
+
|
437 |
+
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
|
438 |
+
indices_i = mask_i.nonzero().squeeze()
|
439 |
+
|
440 |
+
if indices_i.numel() == 0:
|
441 |
+
continue
|
442 |
+
|
443 |
+
inp_i = inp_flat.index_select(0, indices_i) - l_idx
|
444 |
+
emb_i = self.emb_layers[i](inp_i)
|
445 |
+
emb_i = nn.functional.linear(emb_i, self.emb_projs[i])
|
446 |
+
|
447 |
+
emb_flat.index_copy_(0, indices_i, emb_i)
|
448 |
+
|
449 |
+
embed_shape = inp.size() + (self.d_proj,)
|
450 |
+
embed = emb_flat.view(embed_shape)
|
451 |
+
|
452 |
+
embed.mul_(self.emb_scale)
|
453 |
+
|
454 |
+
return embed
|
455 |
+
|
456 |
+
|
457 |
+
class TransfoXLPreTrainedModel(PreTrainedModel):
|
458 |
+
"""
|
459 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
460 |
+
models.
|
461 |
+
"""
|
462 |
+
|
463 |
+
config_class = TransfoXLConfig
|
464 |
+
load_tf_weights = load_tf_weights_in_transfo_xl
|
465 |
+
base_model_prefix = "transformer"
|
466 |
+
|
467 |
+
def _init_weight(self, weight):
|
468 |
+
if self.config.init == "uniform":
|
469 |
+
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
|
470 |
+
elif self.config.init == "normal":
|
471 |
+
nn.init.normal_(weight, 0.0, self.config.init_std)
|
472 |
+
|
473 |
+
def _init_bias(self, bias):
|
474 |
+
nn.init.constant_(bias, 0.0)
|
475 |
+
|
476 |
+
def _init_weights(self, m):
|
477 |
+
"""Initialize the weights."""
|
478 |
+
classname = m.__class__.__name__
|
479 |
+
if classname.find("Linear") != -1:
|
480 |
+
if hasattr(m, "weight") and m.weight is not None:
|
481 |
+
self._init_weight(m.weight)
|
482 |
+
if hasattr(m, "bias") and m.bias is not None:
|
483 |
+
self._init_bias(m.bias)
|
484 |
+
elif classname.find("AdaptiveEmbedding") != -1:
|
485 |
+
if hasattr(m, "emb_projs"):
|
486 |
+
for i in range(len(m.emb_projs)):
|
487 |
+
if m.emb_projs[i] is not None:
|
488 |
+
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
|
489 |
+
elif classname.find("Embedding") != -1:
|
490 |
+
if hasattr(m, "weight"):
|
491 |
+
self._init_weight(m.weight)
|
492 |
+
elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
|
493 |
+
if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
|
494 |
+
self._init_weight(m.cluster_weight)
|
495 |
+
if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
|
496 |
+
self._init_bias(m.cluster_bias)
|
497 |
+
if hasattr(m, "out_projs"):
|
498 |
+
for i in range(len(m.out_projs)):
|
499 |
+
if m.out_projs[i] is not None:
|
500 |
+
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
|
501 |
+
elif classname.find("LayerNorm") != -1:
|
502 |
+
if hasattr(m, "weight"):
|
503 |
+
nn.init.normal_(m.weight, 1.0, self.config.init_std)
|
504 |
+
if hasattr(m, "bias") and m.bias is not None:
|
505 |
+
self._init_bias(m.bias)
|
506 |
+
else:
|
507 |
+
if hasattr(m, "r_emb"):
|
508 |
+
self._init_weight(m.r_emb)
|
509 |
+
if hasattr(m, "r_w_bias"):
|
510 |
+
self._init_weight(m.r_w_bias)
|
511 |
+
if hasattr(m, "r_r_bias"):
|
512 |
+
self._init_weight(m.r_r_bias)
|
513 |
+
if hasattr(m, "r_bias"):
|
514 |
+
self._init_bias(m.r_bias)
|
515 |
+
|
516 |
+
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):
|
517 |
+
"""
|
518 |
+
Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying
|
519 |
+
weights embeddings afterwards if the model class has a *tie_weights()* method.
|
520 |
+
|
521 |
+
Arguments:
|
522 |
+
new_num_tokens: (*optional*) int:
|
523 |
+
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
|
524 |
+
the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
|
525 |
+
just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model.
|
526 |
+
layer: (*optional*) int:
|
527 |
+
Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be
|
528 |
+
resized. Be aware that when resizing other than the last layer, you have to ensure that the new
|
529 |
+
token(s) in the tokenizer are at the corresponding position.
|
530 |
+
|
531 |
+
Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model
|
532 |
+
"""
|
533 |
+
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
|
534 |
+
|
535 |
+
if new_num_tokens is None:
|
536 |
+
return self.get_input_embeddings()
|
537 |
+
|
538 |
+
new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer)
|
539 |
+
assert new_num_tokens_layer > 0, "The size of the new embedding layer cannot be 0 or less"
|
540 |
+
model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer)
|
541 |
+
|
542 |
+
# Update base model and current model config
|
543 |
+
self.config.vocab_size = new_num_tokens
|
544 |
+
base_model.vocab_size = new_num_tokens
|
545 |
+
base_model.n_token = new_num_tokens
|
546 |
+
|
547 |
+
new_embedding_shapes = self._get_embedding_shapes()
|
548 |
+
self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer)
|
549 |
+
|
550 |
+
# Tie weights again if needed
|
551 |
+
self.tie_weights()
|
552 |
+
|
553 |
+
return model_embeds
|
554 |
+
|
555 |
+
def _get_new_num_tokens_layer(self, new_num_tokens, layer):
|
556 |
+
embeddings = self.get_input_embeddings()
|
557 |
+
if layer == -1:
|
558 |
+
layer = len(embeddings.emb_layers) - 1
|
559 |
+
assert 0 <= layer <= len(embeddings.emb_layers) - 1
|
560 |
+
|
561 |
+
new_num_tokens_layer = (
|
562 |
+
new_num_tokens
|
563 |
+
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
|
564 |
+
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
|
565 |
+
)
|
566 |
+
return new_num_tokens_layer, layer
|
567 |
+
|
568 |
+
def _get_embedding_shapes(self):
|
569 |
+
embeddings = self.get_input_embeddings()
|
570 |
+
return [emb.weight.shape[0] for emb in embeddings.emb_layers]
|
571 |
+
|
572 |
+
def _resize_token_embeddings(self, new_num_tokens, layer=-1):
|
573 |
+
embeddings = self.get_input_embeddings()
|
574 |
+
if new_num_tokens is None:
|
575 |
+
return embeddings
|
576 |
+
new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens)
|
577 |
+
embeddings.emb_layers[layer] = new_embeddings_layer
|
578 |
+
|
579 |
+
self.set_input_embeddings(embeddings)
|
580 |
+
|
581 |
+
return self.get_input_embeddings()
|
582 |
+
|
583 |
+
def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
|
584 |
+
embeddings = self.get_input_embeddings()
|
585 |
+
|
586 |
+
for i in range(layer, len(embeddings.cutoffs)):
|
587 |
+
embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1])
|
588 |
+
|
589 |
+
embeddings.cutoff_ends = [0] + embeddings.cutoffs
|
590 |
+
embeddings.n_token = new_num_tokens
|
591 |
+
|
592 |
+
self.config.cutoffs = embeddings.cutoffs[:-1]
|
593 |
+
|
594 |
+
return embeddings.cutoffs
|
595 |
+
|
596 |
+
|
597 |
+
@dataclass
|
598 |
+
class TransfoXLModelOutput(ModelOutput):
|
599 |
+
"""
|
600 |
+
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
601 |
+
|
602 |
+
Args:
|
603 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
604 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
605 |
+
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
|
606 |
+
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
|
607 |
+
input) to speed up sequential decoding. The token ids which have their past given to this model should not
|
608 |
+
be passed as input ids as they have already been computed.
|
609 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
610 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
611 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
612 |
+
|
613 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
614 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
615 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
616 |
+
sequence_length)`.
|
617 |
+
|
618 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
619 |
+
heads.
|
620 |
+
"""
|
621 |
+
|
622 |
+
last_hidden_state: torch.FloatTensor
|
623 |
+
mems: List[torch.FloatTensor] = None
|
624 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
625 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
626 |
+
|
627 |
+
|
628 |
+
@dataclass
|
629 |
+
class TransfoXLSequenceClassifierOutputWithPast(ModelOutput):
|
630 |
+
"""
|
631 |
+
Base class for outputs of sentence classification models.
|
632 |
+
|
633 |
+
Args:
|
634 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
635 |
+
Classification (or regression if config.num_labels==1) loss.
|
636 |
+
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
|
637 |
+
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
638 |
+
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
|
639 |
+
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
|
640 |
+
input) to speed up sequential decoding. The token ids which have their past given to this model should not
|
641 |
+
be passed as input ids as they have already been computed.
|
642 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
643 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
644 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
645 |
+
|
646 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
647 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
648 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
649 |
+
sequence_length)`.
|
650 |
+
|
651 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
652 |
+
heads.
|
653 |
+
"""
|
654 |
+
|
655 |
+
loss: Optional[torch.FloatTensor] = None
|
656 |
+
logits: torch.FloatTensor = None
|
657 |
+
mems: List[torch.FloatTensor] = None
|
658 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
659 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
660 |
+
|
661 |
+
|
662 |
+
@dataclass
|
663 |
+
class TransfoXLLMHeadModelOutput(ModelOutput):
|
664 |
+
"""
|
665 |
+
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
666 |
+
|
667 |
+
Args:
|
668 |
+
losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
|
669 |
+
Language modeling losses (not reduced).
|
670 |
+
prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
671 |
+
Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
|
672 |
+
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
|
673 |
+
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
|
674 |
+
input) to speed up sequential decoding. The token ids which have their past given to this model should not
|
675 |
+
be passed as input ids as they have already been computed.
|
676 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
677 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
678 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
679 |
+
|
680 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
681 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
682 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
683 |
+
sequence_length)`.
|
684 |
+
|
685 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
686 |
+
heads.
|
687 |
+
loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided)
|
688 |
+
Reduced language modeling loss.
|
689 |
+
"""
|
690 |
+
|
691 |
+
losses: Optional[torch.FloatTensor] = None
|
692 |
+
prediction_scores: torch.FloatTensor = None
|
693 |
+
mems: List[torch.FloatTensor] = None
|
694 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
695 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
696 |
+
loss: Optional[torch.FloatTensor] = None
|
697 |
+
|
698 |
+
@property
|
699 |
+
def logits(self):
|
700 |
+
# prediction scores are the output of the adaptive softmax, see
|
701 |
+
# the file `modeling_transfo_xl_utilities`. Since the adaptive
|
702 |
+
# softmax returns the log softmax value, `self.prediction_scores`
|
703 |
+
# are strictly speaking not exactly `logits`, but behave the same
|
704 |
+
# way logits do.
|
705 |
+
return self.prediction_scores
|
706 |
+
|
707 |
+
|
708 |
+
TRANSFO_XL_START_DOCSTRING = r"""
|
709 |
+
|
710 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
711 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
712 |
+
etc.)
|
713 |
+
|
714 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
715 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
716 |
+
and behavior.
|
717 |
+
|
718 |
+
Parameters:
|
719 |
+
config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
|
720 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
721 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
722 |
+
"""
|
723 |
+
|
724 |
+
TRANSFO_XL_INPUTS_DOCSTRING = r"""
|
725 |
+
Args:
|
726 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
727 |
+
Indices of input sequence tokens in the vocabulary.
|
728 |
+
|
729 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
730 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
731 |
+
|
732 |
+
[What are input IDs?](../glossary#input-ids)
|
733 |
+
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
|
734 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
735 |
+
`mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
|
736 |
+
given to this model should not be passed as `input_ids` as they have already been computed.
|
737 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
738 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
739 |
+
|
740 |
+
- 1 indicates the head is **not masked**,
|
741 |
+
- 0 indicates the head is **masked**.
|
742 |
+
|
743 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
744 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
745 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
746 |
+
model's internal embedding lookup matrix.
|
747 |
+
output_attentions (`bool`, *optional*):
|
748 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
749 |
+
tensors for more detail.
|
750 |
+
output_hidden_states (`bool`, *optional*):
|
751 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
752 |
+
more detail.
|
753 |
+
return_dict (`bool`, *optional*):
|
754 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
755 |
+
"""
|
756 |
+
|
757 |
+
|
758 |
+
@add_start_docstrings(
|
759 |
+
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
|
760 |
+
TRANSFO_XL_START_DOCSTRING,
|
761 |
+
)
|
762 |
+
class TransfoXLModel(TransfoXLPreTrainedModel):
|
763 |
+
def __init__(self, config):
|
764 |
+
super().__init__(config)
|
765 |
+
|
766 |
+
self.n_token = config.vocab_size
|
767 |
+
|
768 |
+
self.d_embed = config.d_embed
|
769 |
+
self.d_model = config.d_model
|
770 |
+
self.n_head = config.n_head
|
771 |
+
self.d_head = config.d_head
|
772 |
+
|
773 |
+
self.word_emb = AdaptiveEmbedding(
|
774 |
+
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
|
775 |
+
)
|
776 |
+
|
777 |
+
self.drop = nn.Dropout(config.dropout)
|
778 |
+
|
779 |
+
self.n_layer = config.n_layer
|
780 |
+
self.mem_len = config.mem_len
|
781 |
+
self.attn_type = config.attn_type
|
782 |
+
|
783 |
+
if not config.untie_r:
|
784 |
+
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
|
785 |
+
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
|
786 |
+
|
787 |
+
self.layers = nn.ModuleList()
|
788 |
+
if config.attn_type == 0: # the default attention
|
789 |
+
for i in range(config.n_layer):
|
790 |
+
self.layers.append(
|
791 |
+
RelPartialLearnableDecoderLayer(
|
792 |
+
config.n_head,
|
793 |
+
config.d_model,
|
794 |
+
config.d_head,
|
795 |
+
config.d_inner,
|
796 |
+
config.dropout,
|
797 |
+
dropatt=config.dropatt,
|
798 |
+
pre_lnorm=config.pre_lnorm,
|
799 |
+
r_w_bias=None if config.untie_r else self.r_w_bias,
|
800 |
+
r_r_bias=None if config.untie_r else self.r_r_bias,
|
801 |
+
layer_norm_epsilon=config.layer_norm_epsilon,
|
802 |
+
)
|
803 |
+
)
|
804 |
+
else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
|
805 |
+
raise NotImplementedError # Removed them to avoid maintaining dead code
|
806 |
+
|
807 |
+
self.same_length = config.same_length
|
808 |
+
self.clamp_len = config.clamp_len
|
809 |
+
|
810 |
+
if self.attn_type == 0: # default attention
|
811 |
+
self.pos_emb = PositionalEmbedding(self.d_model)
|
812 |
+
else: # learnable embeddings and absolute embeddings
|
813 |
+
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
|
814 |
+
|
815 |
+
# Initialize weights and apply final processing
|
816 |
+
self.post_init()
|
817 |
+
|
818 |
+
def get_input_embeddings(self):
|
819 |
+
return self.word_emb
|
820 |
+
|
821 |
+
def set_input_embeddings(self, new_embeddings):
|
822 |
+
self.word_emb = new_embeddings
|
823 |
+
|
824 |
+
def backward_compatible(self):
|
825 |
+
self.sample_softmax = -1
|
826 |
+
|
827 |
+
def reset_memory_length(self, mem_len):
|
828 |
+
self.mem_len = mem_len
|
829 |
+
|
830 |
+
def _prune_heads(self, heads):
|
831 |
+
logger.info("Head pruning is not implemented for Transformer-XL model")
|
832 |
+
pass
|
833 |
+
|
834 |
+
def init_mems(self, bsz):
|
835 |
+
if self.mem_len > 0:
|
836 |
+
mems = []
|
837 |
+
param = next(self.parameters())
|
838 |
+
for i in range(self.n_layer):
|
839 |
+
empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
|
840 |
+
mems.append(empty)
|
841 |
+
|
842 |
+
return mems
|
843 |
+
else:
|
844 |
+
return None
|
845 |
+
|
846 |
+
def _update_mems(self, hids, mems, mlen, qlen):
|
847 |
+
# does not deal with None
|
848 |
+
if mems is None:
|
849 |
+
return None
|
850 |
+
|
851 |
+
# mems is not None
|
852 |
+
assert len(hids) == len(mems), "len(hids) != len(mems)"
|
853 |
+
|
854 |
+
# There are `mlen + qlen` steps that can be cached into mems
|
855 |
+
with torch.no_grad():
|
856 |
+
new_mems = []
|
857 |
+
end_idx = mlen + max(0, qlen)
|
858 |
+
beg_idx = max(0, end_idx - self.mem_len)
|
859 |
+
for i in range(len(hids)):
|
860 |
+
cat = torch.cat([mems[i], hids[i]], dim=0)
|
861 |
+
new_mems.append(cat[beg_idx:end_idx].detach())
|
862 |
+
|
863 |
+
return new_mems
|
864 |
+
|
865 |
+
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
|
866 |
+
@add_code_sample_docstrings(
|
867 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
868 |
+
output_type=TransfoXLModelOutput,
|
869 |
+
config_class=_CONFIG_FOR_DOC,
|
870 |
+
)
|
871 |
+
def forward(
|
872 |
+
self,
|
873 |
+
input_ids: Optional[torch.LongTensor] = None,
|
874 |
+
mems: Optional[List[torch.FloatTensor]] = None,
|
875 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
876 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
877 |
+
output_attentions: Optional[bool] = None,
|
878 |
+
output_hidden_states: Optional[bool] = None,
|
879 |
+
return_dict: Optional[bool] = None,
|
880 |
+
) -> Union[Tuple, TransfoXLModelOutput]:
|
881 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
882 |
+
output_hidden_states = (
|
883 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
884 |
+
)
|
885 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
886 |
+
|
887 |
+
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
|
888 |
+
# so we transpose here from shape [bsz, len] to shape [len, bsz]
|
889 |
+
if input_ids is not None and inputs_embeds is not None:
|
890 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
891 |
+
elif input_ids is not None:
|
892 |
+
input_ids = input_ids.transpose(0, 1).contiguous()
|
893 |
+
qlen, bsz = input_ids.size()
|
894 |
+
elif inputs_embeds is not None:
|
895 |
+
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
|
896 |
+
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
|
897 |
+
else:
|
898 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
899 |
+
|
900 |
+
if mems is None:
|
901 |
+
mems = self.init_mems(bsz)
|
902 |
+
|
903 |
+
# Prepare head mask if needed
|
904 |
+
# 1.0 in head_mask indicate we keep the head
|
905 |
+
# attention_probs has shape bsz x n_heads x N x N
|
906 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
|
907 |
+
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
|
908 |
+
if head_mask is not None:
|
909 |
+
if head_mask.dim() == 1:
|
910 |
+
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
|
911 |
+
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
|
912 |
+
elif head_mask.dim() == 2:
|
913 |
+
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
|
914 |
+
head_mask = head_mask.to(
|
915 |
+
dtype=next(self.parameters()).dtype
|
916 |
+
) # switch to float if need + fp16 compatibility
|
917 |
+
else:
|
918 |
+
head_mask = [None] * self.n_layer
|
919 |
+
|
920 |
+
if inputs_embeds is not None:
|
921 |
+
word_emb = inputs_embeds
|
922 |
+
else:
|
923 |
+
word_emb = self.word_emb(input_ids)
|
924 |
+
|
925 |
+
mlen = mems[0].size(0) if mems is not None else 0
|
926 |
+
klen = mlen + qlen
|
927 |
+
if self.same_length:
|
928 |
+
all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool)
|
929 |
+
mask_len = klen - self.mem_len
|
930 |
+
if mask_len > 0:
|
931 |
+
mask_shift_len = qlen - mask_len
|
932 |
+
else:
|
933 |
+
mask_shift_len = qlen
|
934 |
+
dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
|
935 |
+
else:
|
936 |
+
dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[
|
937 |
+
:, :, None
|
938 |
+
]
|
939 |
+
|
940 |
+
hids = []
|
941 |
+
attentions = [] if output_attentions else None
|
942 |
+
if self.attn_type == 0: # default
|
943 |
+
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as(
|
944 |
+
dtype=word_emb.dtype
|
945 |
+
)
|
946 |
+
if self.clamp_len > 0:
|
947 |
+
pos_seq.clamp_(max=self.clamp_len)
|
948 |
+
pos_emb = self.pos_emb(pos_seq)
|
949 |
+
|
950 |
+
core_out = self.drop(word_emb)
|
951 |
+
pos_emb = self.drop(pos_emb)
|
952 |
+
|
953 |
+
for i, layer in enumerate(self.layers):
|
954 |
+
hids.append(core_out)
|
955 |
+
mems_i = None if mems is None else mems[i]
|
956 |
+
layer_outputs = layer(
|
957 |
+
core_out,
|
958 |
+
pos_emb,
|
959 |
+
dec_attn_mask=dec_attn_mask,
|
960 |
+
mems=mems_i,
|
961 |
+
head_mask=head_mask[i],
|
962 |
+
output_attentions=output_attentions,
|
963 |
+
)
|
964 |
+
core_out = layer_outputs[0]
|
965 |
+
if output_attentions:
|
966 |
+
attentions.append(layer_outputs[1])
|
967 |
+
else: # learnable embeddings and absolute embeddings
|
968 |
+
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
|
969 |
+
|
970 |
+
core_out = self.drop(core_out)
|
971 |
+
|
972 |
+
new_mems = self._update_mems(hids, mems, mlen, qlen)
|
973 |
+
|
974 |
+
if output_hidden_states:
|
975 |
+
# Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
|
976 |
+
hids.append(core_out)
|
977 |
+
hids = tuple(t.transpose(0, 1).contiguous() for t in hids)
|
978 |
+
else:
|
979 |
+
hids = None
|
980 |
+
if output_attentions:
|
981 |
+
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
|
982 |
+
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
|
983 |
+
# We transpose back here to shape [bsz, len, hidden_dim]
|
984 |
+
core_out = core_out.transpose(0, 1).contiguous()
|
985 |
+
|
986 |
+
if not return_dict:
|
987 |
+
return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
|
988 |
+
|
989 |
+
return TransfoXLModelOutput(
|
990 |
+
last_hidden_state=core_out,
|
991 |
+
mems=new_mems,
|
992 |
+
hidden_states=hids,
|
993 |
+
attentions=attentions,
|
994 |
+
)
|
995 |
+
|
996 |
+
|
997 |
+
@add_start_docstrings(
|
998 |
+
"""
|
999 |
+
The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
|
1000 |
+
input embeddings)
|
1001 |
+
""",
|
1002 |
+
TRANSFO_XL_START_DOCSTRING,
|
1003 |
+
)
|
1004 |
+
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
|
1005 |
+
_tied_weights_keys = [r"crit\.out_projs\.\d+", r"crit\.out_layers\.\d+\.weight"]
|
1006 |
+
|
1007 |
+
def __init__(self, config):
|
1008 |
+
super().__init__(config)
|
1009 |
+
self.transformer = TransfoXLModel(config)
|
1010 |
+
self.sample_softmax = config.sample_softmax
|
1011 |
+
self.trainer_compatible = getattr(config, "trainer_compatible", False)
|
1012 |
+
|
1013 |
+
if not self.trainer_compatible:
|
1014 |
+
warnings.warn(
|
1015 |
+
"The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order "
|
1016 |
+
"to use that updated output, please specify `trainer_compatible=True` as your configuration"
|
1017 |
+
" attribute.",
|
1018 |
+
DeprecationWarning,
|
1019 |
+
)
|
1020 |
+
|
1021 |
+
assert self.sample_softmax <= 0, (
|
1022 |
+
"Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
|
1023 |
+
" https://github.com/huggingface/transformers/issues/3310"
|
1024 |
+
)
|
1025 |
+
|
1026 |
+
self.crit = ProjectedAdaptiveLogSoftmax(
|
1027 |
+
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
|
1028 |
+
)
|
1029 |
+
|
1030 |
+
# Initialize weights and apply final processing
|
1031 |
+
self.post_init()
|
1032 |
+
|
1033 |
+
def tie_weights(self):
|
1034 |
+
"""
|
1035 |
+
Run this to be sure output and input (adaptive) softmax weights are tied
|
1036 |
+
"""
|
1037 |
+
|
1038 |
+
if self.config.tie_word_embeddings:
|
1039 |
+
for i in range(len(self.crit.out_layers)):
|
1040 |
+
self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
|
1041 |
+
if self.config.tie_projs:
|
1042 |
+
for i, tie_proj in enumerate(self.config.tie_projs):
|
1043 |
+
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
|
1044 |
+
if self.config.torchscript:
|
1045 |
+
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
|
1046 |
+
else:
|
1047 |
+
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
|
1048 |
+
elif tie_proj and self.config.div_val != 1:
|
1049 |
+
if self.config.torchscript:
|
1050 |
+
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
|
1051 |
+
else:
|
1052 |
+
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
|
1053 |
+
|
1054 |
+
def reset_memory_length(self, mem_len):
|
1055 |
+
self.transformer.reset_memory_length(mem_len)
|
1056 |
+
|
1057 |
+
def init_mems(self, bsz):
|
1058 |
+
return self.transformer.init_mems(bsz)
|
1059 |
+
|
1060 |
+
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
|
1061 |
+
@add_code_sample_docstrings(
|
1062 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1063 |
+
output_type=TransfoXLLMHeadModelOutput,
|
1064 |
+
config_class=_CONFIG_FOR_DOC,
|
1065 |
+
)
|
1066 |
+
def forward(
|
1067 |
+
self,
|
1068 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1069 |
+
mems: Optional[List[torch.FloatTensor]] = None,
|
1070 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1071 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1072 |
+
labels: Optional[torch.LongTensor] = None,
|
1073 |
+
output_attentions: Optional[bool] = None,
|
1074 |
+
output_hidden_states: Optional[bool] = None,
|
1075 |
+
return_dict: Optional[bool] = None,
|
1076 |
+
) -> Union[Tuple, TransfoXLLMHeadModelOutput]:
|
1077 |
+
r"""
|
1078 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1079 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
1080 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
1081 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
1082 |
+
"""
|
1083 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1084 |
+
if input_ids is not None:
|
1085 |
+
bsz, tgt_len = input_ids.size(0), input_ids.size(1)
|
1086 |
+
elif inputs_embeds is not None:
|
1087 |
+
bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
|
1088 |
+
else:
|
1089 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1090 |
+
|
1091 |
+
transformer_outputs = self.transformer(
|
1092 |
+
input_ids,
|
1093 |
+
mems=mems,
|
1094 |
+
head_mask=head_mask,
|
1095 |
+
inputs_embeds=inputs_embeds,
|
1096 |
+
output_attentions=output_attentions,
|
1097 |
+
output_hidden_states=output_hidden_states,
|
1098 |
+
return_dict=return_dict,
|
1099 |
+
)
|
1100 |
+
|
1101 |
+
last_hidden = transformer_outputs[0]
|
1102 |
+
pred_hid = last_hidden[:, -tgt_len:]
|
1103 |
+
|
1104 |
+
if labels is not None:
|
1105 |
+
# Prevents all labels being -100 and throwing an error
|
1106 |
+
# when backwarding the loss
|
1107 |
+
miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100
|
1108 |
+
if miss_valid_label:
|
1109 |
+
# Sets an <EOS> token, just to prevent loss from being NaN
|
1110 |
+
labels[0, 1] = self.config.eos_token_id
|
1111 |
+
|
1112 |
+
softmax_output = self.crit(pred_hid, labels)
|
1113 |
+
prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else ()
|
1114 |
+
|
1115 |
+
if labels is not None:
|
1116 |
+
losses = softmax_output.view(bsz, tgt_len - 1)
|
1117 |
+
# Avoids from incorporating padding (-100) tokens into loss value
|
1118 |
+
loss = losses[losses != 0].mean()
|
1119 |
+
else:
|
1120 |
+
losses, loss = None, None
|
1121 |
+
|
1122 |
+
if not return_dict:
|
1123 |
+
if self.trainer_compatible:
|
1124 |
+
output = (prediction_scores, losses) if losses is not None else (prediction_scores,)
|
1125 |
+
output += transformer_outputs[1:]
|
1126 |
+
return ((loss,) + output) if loss is not None else output
|
1127 |
+
else:
|
1128 |
+
output = (prediction_scores, *transformer_outputs[1:])
|
1129 |
+
output = ((losses,) + output) if losses is not None else output
|
1130 |
+
return (output + (loss,)) if loss is not None else output
|
1131 |
+
|
1132 |
+
return TransfoXLLMHeadModelOutput(
|
1133 |
+
loss=loss,
|
1134 |
+
prediction_scores=prediction_scores,
|
1135 |
+
losses=losses,
|
1136 |
+
mems=transformer_outputs.mems,
|
1137 |
+
hidden_states=transformer_outputs.hidden_states,
|
1138 |
+
attentions=transformer_outputs.attentions,
|
1139 |
+
)
|
1140 |
+
|
1141 |
+
def get_output_embeddings(self):
|
1142 |
+
"""Double-check if you are using adaptive softmax."""
|
1143 |
+
if self.sample_softmax > 0:
|
1144 |
+
return self.out_layer
|
1145 |
+
else:
|
1146 |
+
return self.crit.out_layers[-1]
|
1147 |
+
|
1148 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
|
1149 |
+
inputs = {}
|
1150 |
+
|
1151 |
+
# if past is defined in model kwargs then use it for faster decoding
|
1152 |
+
if past_key_values:
|
1153 |
+
inputs["mems"] = past_key_values
|
1154 |
+
inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1)
|
1155 |
+
else:
|
1156 |
+
inputs["input_ids"] = input_ids
|
1157 |
+
|
1158 |
+
return inputs
|
1159 |
+
|
1160 |
+
def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
|
1161 |
+
new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer)
|
1162 |
+
|
1163 |
+
self.crit.cutoffs = new_cutoffs
|
1164 |
+
self.crit.cutoff_ends = [0] + new_cutoffs
|
1165 |
+
self.crit.n_token = new_num_tokens
|
1166 |
+
|
1167 |
+
@staticmethod
|
1168 |
+
def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
|
1169 |
+
"""
|
1170 |
+
This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
|
1171 |
+
[`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
|
1172 |
+
generation step.
|
1173 |
+
"""
|
1174 |
+
return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
|
1175 |
+
|
1176 |
+
|
1177 |
+
@add_start_docstrings(
|
1178 |
+
"""
|
1179 |
+
The Transformer-XL Model transformer with a sequence classification head on top (linear layer).
|
1180 |
+
|
1181 |
+
[`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
|
1182 |
+
models (e.g. GPT-1) do.
|
1183 |
+
|
1184 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1185 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1186 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1187 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1188 |
+
each row of the batch).
|
1189 |
+
""",
|
1190 |
+
TRANSFO_XL_START_DOCSTRING,
|
1191 |
+
)
|
1192 |
+
class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):
|
1193 |
+
def __init__(self, config):
|
1194 |
+
super().__init__(config)
|
1195 |
+
self.num_labels = config.num_labels
|
1196 |
+
self.transformer = TransfoXLModel(config)
|
1197 |
+
self.score = nn.Linear(config.d_embed, self.num_labels, bias=False)
|
1198 |
+
# Initialize weights and apply final processing
|
1199 |
+
self.post_init()
|
1200 |
+
|
1201 |
+
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
|
1202 |
+
@add_code_sample_docstrings(
|
1203 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1204 |
+
output_type=TransfoXLSequenceClassifierOutputWithPast,
|
1205 |
+
config_class=_CONFIG_FOR_DOC,
|
1206 |
+
)
|
1207 |
+
def forward(
|
1208 |
+
self,
|
1209 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1210 |
+
mems: Optional[List[torch.FloatTensor]] = None,
|
1211 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1212 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1213 |
+
labels: Optional[torch.LongTensor] = None,
|
1214 |
+
output_attentions: Optional[bool] = None,
|
1215 |
+
output_hidden_states: Optional[bool] = None,
|
1216 |
+
return_dict: Optional[bool] = None,
|
1217 |
+
) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:
|
1218 |
+
r"""
|
1219 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1220 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1221 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1222 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1223 |
+
"""
|
1224 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1225 |
+
|
1226 |
+
transformer_outputs = self.transformer(
|
1227 |
+
input_ids,
|
1228 |
+
mems=mems,
|
1229 |
+
head_mask=head_mask,
|
1230 |
+
inputs_embeds=inputs_embeds,
|
1231 |
+
output_attentions=output_attentions,
|
1232 |
+
output_hidden_states=output_hidden_states,
|
1233 |
+
return_dict=return_dict,
|
1234 |
+
)
|
1235 |
+
hidden_states = transformer_outputs[0]
|
1236 |
+
logits = self.score(hidden_states)
|
1237 |
+
|
1238 |
+
if input_ids is not None:
|
1239 |
+
batch_size, sequence_length = input_ids.shape[:2]
|
1240 |
+
else:
|
1241 |
+
batch_size, sequence_length = inputs_embeds.shape[:2]
|
1242 |
+
|
1243 |
+
assert (
|
1244 |
+
self.config.pad_token_id is not None or batch_size == 1
|
1245 |
+
), "Cannot handle batch sizes > 1 if no padding token is defined."
|
1246 |
+
if self.config.pad_token_id is None:
|
1247 |
+
sequence_lengths = -1
|
1248 |
+
else:
|
1249 |
+
if input_ids is not None:
|
1250 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
1251 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1252 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
1253 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
1254 |
+
else:
|
1255 |
+
sequence_lengths = -1
|
1256 |
+
logger.warning(
|
1257 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
1258 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
1259 |
+
)
|
1260 |
+
|
1261 |
+
pooled_logits = logits[range(batch_size), sequence_lengths]
|
1262 |
+
|
1263 |
+
loss = None
|
1264 |
+
if labels is not None:
|
1265 |
+
if self.config.problem_type is None:
|
1266 |
+
if self.num_labels == 1:
|
1267 |
+
self.config.problem_type = "regression"
|
1268 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1269 |
+
self.config.problem_type = "single_label_classification"
|
1270 |
+
else:
|
1271 |
+
self.config.problem_type = "multi_label_classification"
|
1272 |
+
|
1273 |
+
if self.config.problem_type == "regression":
|
1274 |
+
loss_fct = MSELoss()
|
1275 |
+
if self.num_labels == 1:
|
1276 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1277 |
+
else:
|
1278 |
+
loss = loss_fct(pooled_logits, labels)
|
1279 |
+
elif self.config.problem_type == "single_label_classification":
|
1280 |
+
loss_fct = CrossEntropyLoss()
|
1281 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1282 |
+
elif self.config.problem_type == "multi_label_classification":
|
1283 |
+
loss_fct = BCEWithLogitsLoss()
|
1284 |
+
loss = loss_fct(pooled_logits, labels)
|
1285 |
+
if not return_dict:
|
1286 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1287 |
+
return ((loss,) + output) if loss is not None else output
|
1288 |
+
|
1289 |
+
return TransfoXLSequenceClassifierOutputWithPast(
|
1290 |
+
loss=loss,
|
1291 |
+
logits=pooled_logits,
|
1292 |
+
mems=transformer_outputs.mems,
|
1293 |
+
hidden_states=transformer_outputs.hidden_states,
|
1294 |
+
attentions=transformer_outputs.attentions,
|
1295 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace and Baidu Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
# rely on isort to merge the imports
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_ernie_m": ["ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieMConfig"],
|
22 |
+
}
|
23 |
+
|
24 |
+
try:
|
25 |
+
if not is_sentencepiece_available():
|
26 |
+
raise OptionalDependencyNotAvailable()
|
27 |
+
except OptionalDependencyNotAvailable:
|
28 |
+
pass
|
29 |
+
else:
|
30 |
+
_import_structure["tokenization_ernie_m"] = ["ErnieMTokenizer"]
|
31 |
+
|
32 |
+
try:
|
33 |
+
if not is_torch_available():
|
34 |
+
raise OptionalDependencyNotAvailable()
|
35 |
+
except OptionalDependencyNotAvailable:
|
36 |
+
pass
|
37 |
+
else:
|
38 |
+
_import_structure["modeling_ernie_m"] = [
|
39 |
+
"ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST",
|
40 |
+
"ErnieMForMultipleChoice",
|
41 |
+
"ErnieMForQuestionAnswering",
|
42 |
+
"ErnieMForSequenceClassification",
|
43 |
+
"ErnieMForTokenClassification",
|
44 |
+
"ErnieMModel",
|
45 |
+
"ErnieMPreTrainedModel",
|
46 |
+
"ErnieMForInformationExtraction",
|
47 |
+
]
|
48 |
+
|
49 |
+
|
50 |
+
if TYPE_CHECKING:
|
51 |
+
from .configuration_ernie_m import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieMConfig
|
52 |
+
|
53 |
+
try:
|
54 |
+
if not is_sentencepiece_available():
|
55 |
+
raise OptionalDependencyNotAvailable()
|
56 |
+
except OptionalDependencyNotAvailable:
|
57 |
+
pass
|
58 |
+
else:
|
59 |
+
from .tokenization_ernie_m import ErnieMTokenizer
|
60 |
+
|
61 |
+
try:
|
62 |
+
if not is_torch_available():
|
63 |
+
raise OptionalDependencyNotAvailable()
|
64 |
+
except OptionalDependencyNotAvailable:
|
65 |
+
pass
|
66 |
+
else:
|
67 |
+
from .modeling_ernie_m import (
|
68 |
+
ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST,
|
69 |
+
ErnieMForInformationExtraction,
|
70 |
+
ErnieMForMultipleChoice,
|
71 |
+
ErnieMForQuestionAnswering,
|
72 |
+
ErnieMForSequenceClassification,
|
73 |
+
ErnieMForTokenClassification,
|
74 |
+
ErnieMModel,
|
75 |
+
ErnieMPreTrainedModel,
|
76 |
+
)
|
77 |
+
|
78 |
+
|
79 |
+
else:
|
80 |
+
import sys
|
81 |
+
|
82 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.35 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc
ADDED
Binary file (5.25 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc
ADDED
Binary file (29.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc
ADDED
Binary file (14.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" ErnieM model configuration"""
|
16 |
+
# Adapted from original paddlenlp repository.(https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/transformers/ernie_m/configuration.py)
|
17 |
+
|
18 |
+
from __future__ import annotations
|
19 |
+
|
20 |
+
from typing import Dict
|
21 |
+
|
22 |
+
from ...configuration_utils import PretrainedConfig
|
23 |
+
from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
24 |
+
|
25 |
+
|
26 |
+
class ErnieMConfig(PretrainedConfig):
|
27 |
+
r"""
|
28 |
+
This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a
|
29 |
+
Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
30 |
+
with the defaults will yield a similar configuration to that of the `Ernie-M`
|
31 |
+
[susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture.
|
32 |
+
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_size (`int`, *optional*, defaults to 250002):
|
39 |
+
Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix.
|
40 |
+
Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling
|
41 |
+
[`ErnieMModel`].
|
42 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
43 |
+
Dimensionality of the embedding layer, encoder layers and pooler layer.
|
44 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
45 |
+
Number of hidden layers in the Transformer encoder.
|
46 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
47 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
48 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
49 |
+
Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are
|
50 |
+
firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically
|
51 |
+
intermediate_size is larger than hidden_size.
|
52 |
+
hidden_act (`str`, *optional*, defaults to `"gelu"`):
|
53 |
+
The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch
|
54 |
+
supported activation functions are supported.
|
55 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
56 |
+
The dropout probability for all fully connected layers in the embeddings and encoder.
|
57 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
58 |
+
The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target.
|
59 |
+
max_position_embeddings (`int`, *optional*, defaults to 514):
|
60 |
+
The maximum value of the dimensionality of position encoding, which dictates the maximum supported length
|
61 |
+
of an input sequence.
|
62 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
63 |
+
The standard deviation of the normal initializer for initializing all weight matrices. The index of padding
|
64 |
+
token in the token vocabulary.
|
65 |
+
pad_token_id (`int`, *optional*, defaults to 1):
|
66 |
+
Padding token id.
|
67 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
68 |
+
The epsilon used by the layer normalization layers.
|
69 |
+
classifier_dropout (`float`, *optional*):
|
70 |
+
The dropout ratio for the classification head.
|
71 |
+
act_dropout (`float`, *optional*, defaults to 0.0):
|
72 |
+
This dropout probability is used in `ErnieMEncoderLayer` after activation.
|
73 |
+
|
74 |
+
A normal_initializer initializes weight matrices as normal distributions. See
|
75 |
+
`ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
|
76 |
+
"""
|
77 |
+
|
78 |
+
model_type = "ernie_m"
|
79 |
+
attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
|
80 |
+
|
81 |
+
def __init__(
|
82 |
+
self,
|
83 |
+
vocab_size: int = 250002,
|
84 |
+
hidden_size: int = 768,
|
85 |
+
num_hidden_layers: int = 12,
|
86 |
+
num_attention_heads: int = 12,
|
87 |
+
intermediate_size: int = 3072,
|
88 |
+
hidden_act: str = "gelu",
|
89 |
+
hidden_dropout_prob: float = 0.1,
|
90 |
+
attention_probs_dropout_prob: float = 0.1,
|
91 |
+
max_position_embeddings: int = 514,
|
92 |
+
initializer_range: float = 0.02,
|
93 |
+
pad_token_id: int = 1,
|
94 |
+
layer_norm_eps: float = 1e-05,
|
95 |
+
classifier_dropout=None,
|
96 |
+
act_dropout=0.0,
|
97 |
+
**kwargs,
|
98 |
+
):
|
99 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
100 |
+
self.vocab_size = vocab_size
|
101 |
+
self.hidden_size = hidden_size
|
102 |
+
self.num_hidden_layers = num_hidden_layers
|
103 |
+
self.num_attention_heads = num_attention_heads
|
104 |
+
self.intermediate_size = intermediate_size
|
105 |
+
self.hidden_act = hidden_act
|
106 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
107 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
108 |
+
self.max_position_embeddings = max_position_embeddings
|
109 |
+
self.initializer_range = initializer_range
|
110 |
+
self.layer_norm_eps = layer_norm_eps
|
111 |
+
self.classifier_dropout = classifier_dropout
|
112 |
+
self.act_dropout = act_dropout
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py
ADDED
@@ -0,0 +1,1058 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch ErnieM model."""
|
16 |
+
|
17 |
+
|
18 |
+
import math
|
19 |
+
from typing import List, Optional, Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.utils.checkpoint
|
23 |
+
from torch import nn, tensor
|
24 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
25 |
+
|
26 |
+
from ...activations import ACT2FN
|
27 |
+
from ...modeling_outputs import (
|
28 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
29 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
30 |
+
MultipleChoiceModelOutput,
|
31 |
+
QuestionAnsweringModelOutput,
|
32 |
+
SequenceClassifierOutput,
|
33 |
+
TokenClassifierOutput,
|
34 |
+
)
|
35 |
+
from ...modeling_utils import PreTrainedModel
|
36 |
+
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
37 |
+
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
38 |
+
from .configuration_ernie_m import ErnieMConfig
|
39 |
+
|
40 |
+
|
41 |
+
logger = logging.get_logger(__name__)
|
42 |
+
|
43 |
+
_CHECKPOINT_FOR_DOC = "susnato/ernie-m-base_pytorch"
|
44 |
+
_CONFIG_FOR_DOC = "ErnieMConfig"
|
45 |
+
_TOKENIZER_FOR_DOC = "ErnieMTokenizer"
|
46 |
+
|
47 |
+
|
48 |
+
from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
49 |
+
|
50 |
+
|
51 |
+
# Adapted from paddlenlp.transformers.ernie_m.modeling.ErnieEmbeddings
|
52 |
+
class ErnieMEmbeddings(nn.Module):
|
53 |
+
"""Construct the embeddings from word and position embeddings."""
|
54 |
+
|
55 |
+
def __init__(self, config):
|
56 |
+
super().__init__()
|
57 |
+
self.hidden_size = config.hidden_size
|
58 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
59 |
+
self.position_embeddings = nn.Embedding(
|
60 |
+
config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id
|
61 |
+
)
|
62 |
+
self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)
|
63 |
+
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
|
64 |
+
self.padding_idx = config.pad_token_id
|
65 |
+
|
66 |
+
def forward(
|
67 |
+
self,
|
68 |
+
input_ids: Optional[torch.LongTensor] = None,
|
69 |
+
position_ids: Optional[torch.LongTensor] = None,
|
70 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
71 |
+
past_key_values_length: int = 0,
|
72 |
+
) -> torch.Tensor:
|
73 |
+
if inputs_embeds is None:
|
74 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
75 |
+
if position_ids is None:
|
76 |
+
input_shape = inputs_embeds.size()[:-1]
|
77 |
+
ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)
|
78 |
+
seq_length = torch.cumsum(ones, dim=1)
|
79 |
+
position_ids = seq_length - ones
|
80 |
+
|
81 |
+
if past_key_values_length > 0:
|
82 |
+
position_ids = position_ids + past_key_values_length
|
83 |
+
# to mimic paddlenlp implementation
|
84 |
+
position_ids += 2
|
85 |
+
position_embeddings = self.position_embeddings(position_ids)
|
86 |
+
embeddings = inputs_embeds + position_embeddings
|
87 |
+
embeddings = self.layer_norm(embeddings)
|
88 |
+
embeddings = self.dropout(embeddings)
|
89 |
+
|
90 |
+
return embeddings
|
91 |
+
|
92 |
+
|
93 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ErnieM,self.value->self.v_proj,self.key->self.k_proj,self.query->self.q_proj
|
94 |
+
class ErnieMSelfAttention(nn.Module):
|
95 |
+
def __init__(self, config, position_embedding_type=None):
|
96 |
+
super().__init__()
|
97 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
98 |
+
raise ValueError(
|
99 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
100 |
+
f"heads ({config.num_attention_heads})"
|
101 |
+
)
|
102 |
+
|
103 |
+
self.num_attention_heads = config.num_attention_heads
|
104 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
105 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
106 |
+
|
107 |
+
self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)
|
108 |
+
self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)
|
109 |
+
self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)
|
110 |
+
|
111 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
112 |
+
self.position_embedding_type = position_embedding_type or getattr(
|
113 |
+
config, "position_embedding_type", "absolute"
|
114 |
+
)
|
115 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
116 |
+
self.max_position_embeddings = config.max_position_embeddings
|
117 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
118 |
+
|
119 |
+
self.is_decoder = config.is_decoder
|
120 |
+
|
121 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
122 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
123 |
+
x = x.view(new_x_shape)
|
124 |
+
return x.permute(0, 2, 1, 3)
|
125 |
+
|
126 |
+
def forward(
|
127 |
+
self,
|
128 |
+
hidden_states: torch.Tensor,
|
129 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
130 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
131 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
132 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
133 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
134 |
+
output_attentions: Optional[bool] = False,
|
135 |
+
) -> Tuple[torch.Tensor]:
|
136 |
+
mixed_query_layer = self.q_proj(hidden_states)
|
137 |
+
|
138 |
+
# If this is instantiated as a cross-attention module, the keys
|
139 |
+
# and values come from an encoder; the attention mask needs to be
|
140 |
+
# such that the encoder's padding tokens are not attended to.
|
141 |
+
is_cross_attention = encoder_hidden_states is not None
|
142 |
+
|
143 |
+
if is_cross_attention and past_key_value is not None:
|
144 |
+
# reuse k,v, cross_attentions
|
145 |
+
key_layer = past_key_value[0]
|
146 |
+
value_layer = past_key_value[1]
|
147 |
+
attention_mask = encoder_attention_mask
|
148 |
+
elif is_cross_attention:
|
149 |
+
key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))
|
150 |
+
value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))
|
151 |
+
attention_mask = encoder_attention_mask
|
152 |
+
elif past_key_value is not None:
|
153 |
+
key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
|
154 |
+
value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
|
155 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
156 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
157 |
+
else:
|
158 |
+
key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
|
159 |
+
value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
|
160 |
+
|
161 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
162 |
+
|
163 |
+
use_cache = past_key_value is not None
|
164 |
+
if self.is_decoder:
|
165 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
166 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
167 |
+
# key/value_states (first "if" case)
|
168 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
169 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
170 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
171 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
172 |
+
past_key_value = (key_layer, value_layer)
|
173 |
+
|
174 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
175 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
176 |
+
|
177 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
178 |
+
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
|
179 |
+
if use_cache:
|
180 |
+
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
|
181 |
+
-1, 1
|
182 |
+
)
|
183 |
+
else:
|
184 |
+
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
185 |
+
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
186 |
+
distance = position_ids_l - position_ids_r
|
187 |
+
|
188 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
189 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
190 |
+
|
191 |
+
if self.position_embedding_type == "relative_key":
|
192 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
193 |
+
attention_scores = attention_scores + relative_position_scores
|
194 |
+
elif self.position_embedding_type == "relative_key_query":
|
195 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
196 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
197 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
198 |
+
|
199 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
200 |
+
if attention_mask is not None:
|
201 |
+
# Apply the attention mask is (precomputed for all layers in ErnieMModel forward() function)
|
202 |
+
attention_scores = attention_scores + attention_mask
|
203 |
+
|
204 |
+
# Normalize the attention scores to probabilities.
|
205 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
206 |
+
|
207 |
+
# This is actually dropping out entire tokens to attend to, which might
|
208 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
209 |
+
attention_probs = self.dropout(attention_probs)
|
210 |
+
|
211 |
+
# Mask heads if we want to
|
212 |
+
if head_mask is not None:
|
213 |
+
attention_probs = attention_probs * head_mask
|
214 |
+
|
215 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
216 |
+
|
217 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
218 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
219 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
220 |
+
|
221 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
222 |
+
|
223 |
+
if self.is_decoder:
|
224 |
+
outputs = outputs + (past_key_value,)
|
225 |
+
return outputs
|
226 |
+
|
227 |
+
|
228 |
+
class ErnieMAttention(nn.Module):
|
229 |
+
def __init__(self, config, position_embedding_type=None):
|
230 |
+
super().__init__()
|
231 |
+
self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)
|
232 |
+
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
|
233 |
+
self.pruned_heads = set()
|
234 |
+
|
235 |
+
def prune_heads(self, heads):
|
236 |
+
if len(heads) == 0:
|
237 |
+
return
|
238 |
+
heads, index = find_pruneable_heads_and_indices(
|
239 |
+
heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads
|
240 |
+
)
|
241 |
+
|
242 |
+
# Prune linear layers
|
243 |
+
self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)
|
244 |
+
self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)
|
245 |
+
self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)
|
246 |
+
self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)
|
247 |
+
|
248 |
+
# Update hyper params and store pruned heads
|
249 |
+
self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)
|
250 |
+
self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads
|
251 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
252 |
+
|
253 |
+
def forward(
|
254 |
+
self,
|
255 |
+
hidden_states: torch.Tensor,
|
256 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
257 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
258 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
259 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
260 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
261 |
+
output_attentions: Optional[bool] = False,
|
262 |
+
) -> Tuple[torch.Tensor]:
|
263 |
+
self_outputs = self.self_attn(
|
264 |
+
hidden_states,
|
265 |
+
attention_mask,
|
266 |
+
head_mask,
|
267 |
+
encoder_hidden_states,
|
268 |
+
encoder_attention_mask,
|
269 |
+
past_key_value,
|
270 |
+
output_attentions,
|
271 |
+
)
|
272 |
+
attention_output = self.out_proj(self_outputs[0])
|
273 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
274 |
+
return outputs
|
275 |
+
|
276 |
+
|
277 |
+
class ErnieMEncoderLayer(nn.Module):
|
278 |
+
def __init__(self, config):
|
279 |
+
super().__init__()
|
280 |
+
# to mimic paddlenlp implementation
|
281 |
+
dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob
|
282 |
+
act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout
|
283 |
+
|
284 |
+
self.self_attn = ErnieMAttention(config)
|
285 |
+
self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
286 |
+
self.dropout = nn.Dropout(act_dropout)
|
287 |
+
self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
288 |
+
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
289 |
+
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
290 |
+
self.dropout1 = nn.Dropout(dropout)
|
291 |
+
self.dropout2 = nn.Dropout(dropout)
|
292 |
+
if isinstance(config.hidden_act, str):
|
293 |
+
self.activation = ACT2FN[config.hidden_act]
|
294 |
+
else:
|
295 |
+
self.activation = config.hidden_act
|
296 |
+
|
297 |
+
def forward(
|
298 |
+
self,
|
299 |
+
hidden_states: torch.Tensor,
|
300 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
301 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
302 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
303 |
+
output_attentions: Optional[bool] = True,
|
304 |
+
):
|
305 |
+
residual = hidden_states
|
306 |
+
if output_attentions:
|
307 |
+
hidden_states, attention_opt_weights = self.self_attn(
|
308 |
+
hidden_states=hidden_states,
|
309 |
+
attention_mask=attention_mask,
|
310 |
+
head_mask=head_mask,
|
311 |
+
past_key_value=past_key_value,
|
312 |
+
output_attentions=output_attentions,
|
313 |
+
)
|
314 |
+
|
315 |
+
else:
|
316 |
+
hidden_states = self.self_attn(
|
317 |
+
hidden_states=hidden_states,
|
318 |
+
attention_mask=attention_mask,
|
319 |
+
head_mask=head_mask,
|
320 |
+
past_key_value=past_key_value,
|
321 |
+
output_attentions=output_attentions,
|
322 |
+
)
|
323 |
+
hidden_states = residual + self.dropout1(hidden_states)
|
324 |
+
hidden_states = self.norm1(hidden_states)
|
325 |
+
residual = hidden_states
|
326 |
+
|
327 |
+
hidden_states = self.linear1(hidden_states)
|
328 |
+
hidden_states = self.activation(hidden_states)
|
329 |
+
hidden_states = self.dropout(hidden_states)
|
330 |
+
hidden_states = self.linear2(hidden_states)
|
331 |
+
hidden_states = residual + self.dropout2(hidden_states)
|
332 |
+
hidden_states = self.norm2(hidden_states)
|
333 |
+
|
334 |
+
if output_attentions:
|
335 |
+
return hidden_states, attention_opt_weights
|
336 |
+
else:
|
337 |
+
return hidden_states
|
338 |
+
|
339 |
+
|
340 |
+
class ErnieMEncoder(nn.Module):
|
341 |
+
def __init__(self, config):
|
342 |
+
super().__init__()
|
343 |
+
self.config = config
|
344 |
+
self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
345 |
+
|
346 |
+
def forward(
|
347 |
+
self,
|
348 |
+
input_embeds: torch.Tensor,
|
349 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
350 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
351 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
352 |
+
output_attentions: Optional[bool] = False,
|
353 |
+
output_hidden_states: Optional[bool] = False,
|
354 |
+
return_dict: Optional[bool] = True,
|
355 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
|
356 |
+
hidden_states = () if output_hidden_states else None
|
357 |
+
attentions = () if output_attentions else None
|
358 |
+
|
359 |
+
output = input_embeds
|
360 |
+
if output_hidden_states:
|
361 |
+
hidden_states = hidden_states + (output,)
|
362 |
+
for i, layer in enumerate(self.layers):
|
363 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
364 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
365 |
+
|
366 |
+
output, opt_attn_weights = layer(
|
367 |
+
hidden_states=output,
|
368 |
+
attention_mask=attention_mask,
|
369 |
+
head_mask=layer_head_mask,
|
370 |
+
past_key_value=past_key_value,
|
371 |
+
)
|
372 |
+
|
373 |
+
if output_hidden_states:
|
374 |
+
hidden_states = hidden_states + (output,)
|
375 |
+
if output_attentions:
|
376 |
+
attentions = attentions + (opt_attn_weights,)
|
377 |
+
|
378 |
+
last_hidden_state = output
|
379 |
+
if not return_dict:
|
380 |
+
return tuple(v for v in [last_hidden_state, hidden_states, attentions] if v is not None)
|
381 |
+
|
382 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
383 |
+
last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions
|
384 |
+
)
|
385 |
+
|
386 |
+
|
387 |
+
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ErnieM
|
388 |
+
class ErnieMPooler(nn.Module):
|
389 |
+
def __init__(self, config):
|
390 |
+
super().__init__()
|
391 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
392 |
+
self.activation = nn.Tanh()
|
393 |
+
|
394 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
395 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
396 |
+
# to the first token.
|
397 |
+
first_token_tensor = hidden_states[:, 0]
|
398 |
+
pooled_output = self.dense(first_token_tensor)
|
399 |
+
pooled_output = self.activation(pooled_output)
|
400 |
+
return pooled_output
|
401 |
+
|
402 |
+
|
403 |
+
class ErnieMPreTrainedModel(PreTrainedModel):
|
404 |
+
"""
|
405 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
406 |
+
models.
|
407 |
+
"""
|
408 |
+
|
409 |
+
config_class = ErnieMConfig
|
410 |
+
base_model_prefix = "ernie_m"
|
411 |
+
|
412 |
+
def _init_weights(self, module):
|
413 |
+
"""Initialize the weights"""
|
414 |
+
if isinstance(module, nn.Linear):
|
415 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
416 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
417 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
418 |
+
if module.bias is not None:
|
419 |
+
module.bias.data.zero_()
|
420 |
+
elif isinstance(module, nn.Embedding):
|
421 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
422 |
+
if module.padding_idx is not None:
|
423 |
+
module.weight.data[module.padding_idx].zero_()
|
424 |
+
elif isinstance(module, nn.LayerNorm):
|
425 |
+
module.bias.data.zero_()
|
426 |
+
module.weight.data.fill_(1.0)
|
427 |
+
|
428 |
+
|
429 |
+
ERNIE_M_START_DOCSTRING = r"""
|
430 |
+
|
431 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
432 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
433 |
+
etc.)
|
434 |
+
|
435 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
436 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
437 |
+
behavior.
|
438 |
+
|
439 |
+
Parameters:
|
440 |
+
config ([`ErnieMConfig`]): Model configuration class with all the parameters of the model.
|
441 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
442 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
443 |
+
"""
|
444 |
+
|
445 |
+
ERNIE_M_INPUTS_DOCSTRING = r"""
|
446 |
+
Args:
|
447 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
448 |
+
Indices of input sequence tokens in the vocabulary.
|
449 |
+
|
450 |
+
Indices can be obtained using [`ErnieMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
451 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
452 |
+
|
453 |
+
[What are input IDs?](../glossary#input-ids)
|
454 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
455 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
456 |
+
|
457 |
+
- 1 for tokens that are **not masked**,
|
458 |
+
- 0 for tokens that are **masked**.
|
459 |
+
|
460 |
+
[What are attention masks?](../glossary#attention-mask)
|
461 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
462 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
463 |
+
config.max_position_embeddings - 1]`.
|
464 |
+
|
465 |
+
[What are position IDs?](../glossary#position-ids)
|
466 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
467 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
468 |
+
|
469 |
+
- 1 indicates the head is **not masked**,
|
470 |
+
- 0 indicates the head is **masked**.
|
471 |
+
|
472 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
473 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
474 |
+
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
|
475 |
+
model's internal embedding lookup matrix.
|
476 |
+
output_attentions (`bool`, *optional*):
|
477 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
478 |
+
tensors for more detail.
|
479 |
+
output_hidden_states (`bool`, *optional*):
|
480 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
481 |
+
more detail.
|
482 |
+
return_dict (`bool`, *optional*):
|
483 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
484 |
+
"""
|
485 |
+
|
486 |
+
|
487 |
+
@add_start_docstrings(
|
488 |
+
"The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.",
|
489 |
+
ERNIE_M_START_DOCSTRING,
|
490 |
+
)
|
491 |
+
class ErnieMModel(ErnieMPreTrainedModel):
|
492 |
+
def __init__(self, config, add_pooling_layer=True):
|
493 |
+
super(ErnieMModel, self).__init__(config)
|
494 |
+
self.initializer_range = config.initializer_range
|
495 |
+
self.embeddings = ErnieMEmbeddings(config)
|
496 |
+
self.encoder = ErnieMEncoder(config)
|
497 |
+
self.pooler = ErnieMPooler(config) if add_pooling_layer else None
|
498 |
+
self.post_init()
|
499 |
+
|
500 |
+
def get_input_embeddings(self):
|
501 |
+
return self.embeddings.word_embeddings
|
502 |
+
|
503 |
+
def set_input_embeddings(self, value):
|
504 |
+
self.embeddings.word_embeddings = value
|
505 |
+
|
506 |
+
def _prune_heads(self, heads_to_prune):
|
507 |
+
"""
|
508 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
509 |
+
class PreTrainedModel
|
510 |
+
"""
|
511 |
+
for layer, heads in heads_to_prune.items():
|
512 |
+
self.encoder.layers[layer].self_attn.prune_heads(heads)
|
513 |
+
|
514 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
515 |
+
@add_code_sample_docstrings(
|
516 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
517 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
518 |
+
output_type=BaseModelOutputWithPastAndCrossAttentions,
|
519 |
+
config_class=_CONFIG_FOR_DOC,
|
520 |
+
)
|
521 |
+
def forward(
|
522 |
+
self,
|
523 |
+
input_ids: Optional[tensor] = None,
|
524 |
+
position_ids: Optional[tensor] = None,
|
525 |
+
attention_mask: Optional[tensor] = None,
|
526 |
+
head_mask: Optional[tensor] = None,
|
527 |
+
inputs_embeds: Optional[tensor] = None,
|
528 |
+
past_key_values: Optional[Tuple[Tuple[tensor]]] = None,
|
529 |
+
use_cache: Optional[bool] = None,
|
530 |
+
output_hidden_states: Optional[bool] = None,
|
531 |
+
output_attentions: Optional[bool] = None,
|
532 |
+
return_dict: Optional[bool] = None,
|
533 |
+
) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
534 |
+
if input_ids is not None and inputs_embeds is not None:
|
535 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time.")
|
536 |
+
|
537 |
+
# init the default bool value
|
538 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
539 |
+
output_hidden_states = (
|
540 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
541 |
+
)
|
542 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
543 |
+
|
544 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
545 |
+
|
546 |
+
past_key_values_length = 0
|
547 |
+
if past_key_values is not None:
|
548 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
549 |
+
|
550 |
+
# Adapted from paddlenlp.transformers.ernie_m.ErnieMModel
|
551 |
+
if attention_mask is None:
|
552 |
+
attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)
|
553 |
+
attention_mask *= torch.finfo(attention_mask.dtype).min
|
554 |
+
if past_key_values is not None:
|
555 |
+
batch_size = past_key_values[0][0].shape[0]
|
556 |
+
past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)
|
557 |
+
attention_mask = torch.concat([past_mask, attention_mask], dim=-1)
|
558 |
+
# For 2D attention_mask from tokenizer
|
559 |
+
elif attention_mask.ndim == 2:
|
560 |
+
attention_mask = attention_mask.to(torch.float32)
|
561 |
+
attention_mask = 1.0 - attention_mask
|
562 |
+
attention_mask *= torch.finfo(attention_mask.dtype).min
|
563 |
+
|
564 |
+
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
|
565 |
+
|
566 |
+
embedding_output = self.embeddings(
|
567 |
+
input_ids=input_ids,
|
568 |
+
position_ids=position_ids,
|
569 |
+
inputs_embeds=inputs_embeds,
|
570 |
+
past_key_values_length=past_key_values_length,
|
571 |
+
)
|
572 |
+
encoder_outputs = self.encoder(
|
573 |
+
embedding_output,
|
574 |
+
attention_mask=extended_attention_mask,
|
575 |
+
head_mask=head_mask,
|
576 |
+
past_key_values=past_key_values,
|
577 |
+
output_attentions=output_attentions,
|
578 |
+
output_hidden_states=output_hidden_states,
|
579 |
+
return_dict=return_dict,
|
580 |
+
)
|
581 |
+
|
582 |
+
if not return_dict:
|
583 |
+
sequence_output = encoder_outputs[0]
|
584 |
+
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
|
585 |
+
return (sequence_output, pooler_output) + encoder_outputs[1:]
|
586 |
+
|
587 |
+
sequence_output = encoder_outputs["last_hidden_state"]
|
588 |
+
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
|
589 |
+
hidden_states = None if not output_hidden_states else encoder_outputs["hidden_states"]
|
590 |
+
attentions = None if not output_attentions else encoder_outputs["attentions"]
|
591 |
+
|
592 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
593 |
+
last_hidden_state=sequence_output,
|
594 |
+
pooler_output=pooler_output,
|
595 |
+
hidden_states=hidden_states,
|
596 |
+
attentions=attentions,
|
597 |
+
)
|
598 |
+
|
599 |
+
|
600 |
+
@add_start_docstrings(
|
601 |
+
"""ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of
|
602 |
+
the pooled output) e.g. for GLUE tasks.""",
|
603 |
+
ERNIE_M_START_DOCSTRING,
|
604 |
+
)
|
605 |
+
class ErnieMForSequenceClassification(ErnieMPreTrainedModel):
|
606 |
+
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->ErnieM,bert->ernie_m
|
607 |
+
def __init__(self, config):
|
608 |
+
super().__init__(config)
|
609 |
+
self.num_labels = config.num_labels
|
610 |
+
self.config = config
|
611 |
+
|
612 |
+
self.ernie_m = ErnieMModel(config)
|
613 |
+
classifier_dropout = (
|
614 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
615 |
+
)
|
616 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
617 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
618 |
+
|
619 |
+
# Initialize weights and apply final processing
|
620 |
+
self.post_init()
|
621 |
+
|
622 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
623 |
+
@add_code_sample_docstrings(
|
624 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
625 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
626 |
+
output_type=SequenceClassifierOutput,
|
627 |
+
config_class=_CONFIG_FOR_DOC,
|
628 |
+
)
|
629 |
+
def forward(
|
630 |
+
self,
|
631 |
+
input_ids: Optional[torch.Tensor] = None,
|
632 |
+
attention_mask: Optional[torch.Tensor] = None,
|
633 |
+
position_ids: Optional[torch.Tensor] = None,
|
634 |
+
head_mask: Optional[torch.Tensor] = None,
|
635 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
636 |
+
past_key_values: Optional[List[torch.Tensor]] = None,
|
637 |
+
use_cache: Optional[bool] = None,
|
638 |
+
output_hidden_states: Optional[bool] = None,
|
639 |
+
output_attentions: Optional[bool] = None,
|
640 |
+
return_dict: Optional[bool] = True,
|
641 |
+
labels: Optional[torch.Tensor] = None,
|
642 |
+
) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:
|
643 |
+
r"""
|
644 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
645 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
646 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
647 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
648 |
+
"""
|
649 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
650 |
+
|
651 |
+
outputs = self.ernie_m(
|
652 |
+
input_ids,
|
653 |
+
attention_mask=attention_mask,
|
654 |
+
position_ids=position_ids,
|
655 |
+
head_mask=head_mask,
|
656 |
+
inputs_embeds=inputs_embeds,
|
657 |
+
past_key_values=past_key_values,
|
658 |
+
output_hidden_states=output_hidden_states,
|
659 |
+
output_attentions=output_attentions,
|
660 |
+
return_dict=return_dict,
|
661 |
+
)
|
662 |
+
|
663 |
+
pooled_output = outputs[1]
|
664 |
+
|
665 |
+
pooled_output = self.dropout(pooled_output)
|
666 |
+
logits = self.classifier(pooled_output)
|
667 |
+
|
668 |
+
loss = None
|
669 |
+
if labels is not None:
|
670 |
+
if self.config.problem_type is None:
|
671 |
+
if self.num_labels == 1:
|
672 |
+
self.config.problem_type = "regression"
|
673 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
674 |
+
self.config.problem_type = "single_label_classification"
|
675 |
+
else:
|
676 |
+
self.config.problem_type = "multi_label_classification"
|
677 |
+
|
678 |
+
if self.config.problem_type == "regression":
|
679 |
+
loss_fct = MSELoss()
|
680 |
+
if self.num_labels == 1:
|
681 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
682 |
+
else:
|
683 |
+
loss = loss_fct(logits, labels)
|
684 |
+
elif self.config.problem_type == "single_label_classification":
|
685 |
+
loss_fct = CrossEntropyLoss()
|
686 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
687 |
+
elif self.config.problem_type == "multi_label_classification":
|
688 |
+
loss_fct = BCEWithLogitsLoss()
|
689 |
+
loss = loss_fct(logits, labels)
|
690 |
+
if not return_dict:
|
691 |
+
output = (logits,) + outputs[2:]
|
692 |
+
return ((loss,) + output) if loss is not None else output
|
693 |
+
|
694 |
+
return SequenceClassifierOutput(
|
695 |
+
loss=loss,
|
696 |
+
logits=logits,
|
697 |
+
hidden_states=outputs.hidden_states,
|
698 |
+
attentions=outputs.attentions,
|
699 |
+
)
|
700 |
+
|
701 |
+
|
702 |
+
@add_start_docstrings(
|
703 |
+
"""ErnieM Model with a multiple choice classification head on top (a linear layer on top of
|
704 |
+
the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
|
705 |
+
ERNIE_M_START_DOCSTRING,
|
706 |
+
)
|
707 |
+
class ErnieMForMultipleChoice(ErnieMPreTrainedModel):
|
708 |
+
# Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->ErnieM,bert->ernie_m
|
709 |
+
def __init__(self, config):
|
710 |
+
super().__init__(config)
|
711 |
+
|
712 |
+
self.ernie_m = ErnieMModel(config)
|
713 |
+
classifier_dropout = (
|
714 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
715 |
+
)
|
716 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
717 |
+
self.classifier = nn.Linear(config.hidden_size, 1)
|
718 |
+
|
719 |
+
# Initialize weights and apply final processing
|
720 |
+
self.post_init()
|
721 |
+
|
722 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
|
723 |
+
@add_code_sample_docstrings(
|
724 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
725 |
+
output_type=MultipleChoiceModelOutput,
|
726 |
+
config_class=_CONFIG_FOR_DOC,
|
727 |
+
)
|
728 |
+
def forward(
|
729 |
+
self,
|
730 |
+
input_ids: Optional[torch.Tensor] = None,
|
731 |
+
attention_mask: Optional[torch.Tensor] = None,
|
732 |
+
position_ids: Optional[torch.Tensor] = None,
|
733 |
+
head_mask: Optional[torch.Tensor] = None,
|
734 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
735 |
+
labels: Optional[torch.Tensor] = None,
|
736 |
+
output_attentions: Optional[bool] = None,
|
737 |
+
output_hidden_states: Optional[bool] = None,
|
738 |
+
return_dict: Optional[bool] = True,
|
739 |
+
) -> Union[Tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
|
740 |
+
r"""
|
741 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
742 |
+
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
|
743 |
+
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
|
744 |
+
`input_ids` above)
|
745 |
+
"""
|
746 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
747 |
+
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
|
748 |
+
|
749 |
+
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
|
750 |
+
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
|
751 |
+
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
|
752 |
+
inputs_embeds = (
|
753 |
+
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
|
754 |
+
if inputs_embeds is not None
|
755 |
+
else None
|
756 |
+
)
|
757 |
+
|
758 |
+
outputs = self.ernie_m(
|
759 |
+
input_ids,
|
760 |
+
attention_mask=attention_mask,
|
761 |
+
position_ids=position_ids,
|
762 |
+
head_mask=head_mask,
|
763 |
+
inputs_embeds=inputs_embeds,
|
764 |
+
output_attentions=output_attentions,
|
765 |
+
output_hidden_states=output_hidden_states,
|
766 |
+
return_dict=return_dict,
|
767 |
+
)
|
768 |
+
|
769 |
+
pooled_output = outputs[1]
|
770 |
+
|
771 |
+
pooled_output = self.dropout(pooled_output)
|
772 |
+
logits = self.classifier(pooled_output)
|
773 |
+
reshaped_logits = logits.view(-1, num_choices)
|
774 |
+
|
775 |
+
loss = None
|
776 |
+
if labels is not None:
|
777 |
+
loss_fct = CrossEntropyLoss()
|
778 |
+
loss = loss_fct(reshaped_logits, labels)
|
779 |
+
|
780 |
+
if not return_dict:
|
781 |
+
output = (reshaped_logits,) + outputs[2:]
|
782 |
+
return ((loss,) + output) if loss is not None else output
|
783 |
+
|
784 |
+
return MultipleChoiceModelOutput(
|
785 |
+
loss=loss,
|
786 |
+
logits=reshaped_logits,
|
787 |
+
hidden_states=outputs.hidden_states,
|
788 |
+
attentions=outputs.attentions,
|
789 |
+
)
|
790 |
+
|
791 |
+
|
792 |
+
@add_start_docstrings(
|
793 |
+
"""ErnieM Model with a token classification head on top (a linear layer on top of
|
794 |
+
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
|
795 |
+
ERNIE_M_START_DOCSTRING,
|
796 |
+
)
|
797 |
+
class ErnieMForTokenClassification(ErnieMPreTrainedModel):
|
798 |
+
# Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->ErnieM,bert->ernie_m
|
799 |
+
def __init__(self, config):
|
800 |
+
super().__init__(config)
|
801 |
+
self.num_labels = config.num_labels
|
802 |
+
|
803 |
+
self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
|
804 |
+
classifier_dropout = (
|
805 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
806 |
+
)
|
807 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
808 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
809 |
+
|
810 |
+
# Initialize weights and apply final processing
|
811 |
+
self.post_init()
|
812 |
+
|
813 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
814 |
+
@add_code_sample_docstrings(
|
815 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
816 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
817 |
+
output_type=TokenClassifierOutput,
|
818 |
+
config_class=_CONFIG_FOR_DOC,
|
819 |
+
)
|
820 |
+
def forward(
|
821 |
+
self,
|
822 |
+
input_ids: Optional[torch.Tensor] = None,
|
823 |
+
attention_mask: Optional[torch.Tensor] = None,
|
824 |
+
position_ids: Optional[torch.Tensor] = None,
|
825 |
+
head_mask: Optional[torch.Tensor] = None,
|
826 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
827 |
+
past_key_values: Optional[List[torch.Tensor]] = None,
|
828 |
+
output_hidden_states: Optional[bool] = None,
|
829 |
+
output_attentions: Optional[bool] = None,
|
830 |
+
return_dict: Optional[bool] = True,
|
831 |
+
labels: Optional[torch.Tensor] = None,
|
832 |
+
) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]:
|
833 |
+
r"""
|
834 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
835 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
836 |
+
"""
|
837 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
838 |
+
|
839 |
+
outputs = self.ernie_m(
|
840 |
+
input_ids,
|
841 |
+
attention_mask=attention_mask,
|
842 |
+
position_ids=position_ids,
|
843 |
+
head_mask=head_mask,
|
844 |
+
inputs_embeds=inputs_embeds,
|
845 |
+
past_key_values=past_key_values,
|
846 |
+
output_attentions=output_attentions,
|
847 |
+
output_hidden_states=output_hidden_states,
|
848 |
+
return_dict=return_dict,
|
849 |
+
)
|
850 |
+
|
851 |
+
sequence_output = outputs[0]
|
852 |
+
|
853 |
+
sequence_output = self.dropout(sequence_output)
|
854 |
+
logits = self.classifier(sequence_output)
|
855 |
+
|
856 |
+
loss = None
|
857 |
+
if labels is not None:
|
858 |
+
loss_fct = CrossEntropyLoss()
|
859 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
860 |
+
|
861 |
+
if not return_dict:
|
862 |
+
output = (logits,) + outputs[2:]
|
863 |
+
return ((loss,) + output) if loss is not None else output
|
864 |
+
|
865 |
+
return TokenClassifierOutput(
|
866 |
+
loss=loss,
|
867 |
+
logits=logits,
|
868 |
+
hidden_states=outputs.hidden_states,
|
869 |
+
attentions=outputs.attentions,
|
870 |
+
)
|
871 |
+
|
872 |
+
|
873 |
+
@add_start_docstrings(
|
874 |
+
"""ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
|
875 |
+
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
|
876 |
+
ERNIE_M_START_DOCSTRING,
|
877 |
+
)
|
878 |
+
class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):
|
879 |
+
# Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->ErnieM,bert->ernie_m
|
880 |
+
def __init__(self, config):
|
881 |
+
super().__init__(config)
|
882 |
+
self.num_labels = config.num_labels
|
883 |
+
|
884 |
+
self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
|
885 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
886 |
+
|
887 |
+
# Initialize weights and apply final processing
|
888 |
+
self.post_init()
|
889 |
+
|
890 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
891 |
+
@add_code_sample_docstrings(
|
892 |
+
processor_class=_TOKENIZER_FOR_DOC,
|
893 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
894 |
+
output_type=QuestionAnsweringModelOutput,
|
895 |
+
config_class=_CONFIG_FOR_DOC,
|
896 |
+
)
|
897 |
+
def forward(
|
898 |
+
self,
|
899 |
+
input_ids: Optional[torch.Tensor] = None,
|
900 |
+
attention_mask: Optional[torch.Tensor] = None,
|
901 |
+
position_ids: Optional[torch.Tensor] = None,
|
902 |
+
head_mask: Optional[torch.Tensor] = None,
|
903 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
904 |
+
start_positions: Optional[torch.Tensor] = None,
|
905 |
+
end_positions: Optional[torch.Tensor] = None,
|
906 |
+
output_attentions: Optional[bool] = None,
|
907 |
+
output_hidden_states: Optional[bool] = None,
|
908 |
+
return_dict: Optional[bool] = True,
|
909 |
+
) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
|
910 |
+
r"""
|
911 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
912 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
913 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
914 |
+
are not taken into account for computing the loss.
|
915 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
916 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
917 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
918 |
+
are not taken into account for computing the loss.
|
919 |
+
"""
|
920 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
921 |
+
|
922 |
+
outputs = self.ernie_m(
|
923 |
+
input_ids,
|
924 |
+
attention_mask=attention_mask,
|
925 |
+
position_ids=position_ids,
|
926 |
+
head_mask=head_mask,
|
927 |
+
inputs_embeds=inputs_embeds,
|
928 |
+
output_attentions=output_attentions,
|
929 |
+
output_hidden_states=output_hidden_states,
|
930 |
+
return_dict=return_dict,
|
931 |
+
)
|
932 |
+
|
933 |
+
sequence_output = outputs[0]
|
934 |
+
|
935 |
+
logits = self.qa_outputs(sequence_output)
|
936 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
937 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
938 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
939 |
+
|
940 |
+
total_loss = None
|
941 |
+
if start_positions is not None and end_positions is not None:
|
942 |
+
# If we are on multi-GPU, split add a dimension
|
943 |
+
if len(start_positions.size()) > 1:
|
944 |
+
start_positions = start_positions.squeeze(-1)
|
945 |
+
if len(end_positions.size()) > 1:
|
946 |
+
end_positions = end_positions.squeeze(-1)
|
947 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
948 |
+
ignored_index = start_logits.size(1)
|
949 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
950 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
951 |
+
|
952 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
953 |
+
start_loss = loss_fct(start_logits, start_positions)
|
954 |
+
end_loss = loss_fct(end_logits, end_positions)
|
955 |
+
total_loss = (start_loss + end_loss) / 2
|
956 |
+
|
957 |
+
if not return_dict:
|
958 |
+
output = (start_logits, end_logits) + outputs[2:]
|
959 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
960 |
+
|
961 |
+
return QuestionAnsweringModelOutput(
|
962 |
+
loss=total_loss,
|
963 |
+
start_logits=start_logits,
|
964 |
+
end_logits=end_logits,
|
965 |
+
hidden_states=outputs.hidden_states,
|
966 |
+
attentions=outputs.attentions,
|
967 |
+
)
|
968 |
+
|
969 |
+
|
970 |
+
@add_start_docstrings(
|
971 |
+
"""ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to
|
972 |
+
compute `start_prob` and `end_prob`, designed for Universal Information Extraction.""",
|
973 |
+
ERNIE_M_START_DOCSTRING,
|
974 |
+
)
|
975 |
+
# Copied from paddlenlp.transformers.ernie_m.modeling.UIEM
|
976 |
+
class ErnieMForInformationExtraction(ErnieMPreTrainedModel):
|
977 |
+
def __init__(self, config):
|
978 |
+
super(ErnieMForInformationExtraction, self).__init__(config)
|
979 |
+
self.ernie_m = ErnieMModel(config)
|
980 |
+
self.linear_start = nn.Linear(config.hidden_size, 1)
|
981 |
+
self.linear_end = nn.Linear(config.hidden_size, 1)
|
982 |
+
self.sigmoid = nn.Sigmoid()
|
983 |
+
self.post_init()
|
984 |
+
|
985 |
+
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
|
986 |
+
def forward(
|
987 |
+
self,
|
988 |
+
input_ids: Optional[torch.Tensor] = None,
|
989 |
+
attention_mask: Optional[torch.Tensor] = None,
|
990 |
+
position_ids: Optional[torch.Tensor] = None,
|
991 |
+
head_mask: Optional[torch.Tensor] = None,
|
992 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
993 |
+
start_positions: Optional[torch.Tensor] = None,
|
994 |
+
end_positions: Optional[torch.Tensor] = None,
|
995 |
+
output_attentions: Optional[bool] = None,
|
996 |
+
output_hidden_states: Optional[bool] = None,
|
997 |
+
return_dict: Optional[bool] = True,
|
998 |
+
) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
|
999 |
+
r"""
|
1000 |
+
start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1001 |
+
Labels for position (index) for computing the start_positions loss. Position outside of the sequence are
|
1002 |
+
not taken into account for computing the loss.
|
1003 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1004 |
+
Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not
|
1005 |
+
taken into account for computing the loss.
|
1006 |
+
"""
|
1007 |
+
|
1008 |
+
result = self.ernie_m(
|
1009 |
+
input_ids,
|
1010 |
+
attention_mask=attention_mask,
|
1011 |
+
position_ids=position_ids,
|
1012 |
+
head_mask=head_mask,
|
1013 |
+
inputs_embeds=inputs_embeds,
|
1014 |
+
output_attentions=output_attentions,
|
1015 |
+
output_hidden_states=output_hidden_states,
|
1016 |
+
return_dict=return_dict,
|
1017 |
+
)
|
1018 |
+
if return_dict:
|
1019 |
+
sequence_output = result.last_hidden_state
|
1020 |
+
elif not return_dict:
|
1021 |
+
sequence_output = result[0]
|
1022 |
+
|
1023 |
+
start_logits = self.linear_start(sequence_output)
|
1024 |
+
start_logits = start_logits.squeeze(-1)
|
1025 |
+
end_logits = self.linear_end(sequence_output)
|
1026 |
+
end_logits = end_logits.squeeze(-1)
|
1027 |
+
|
1028 |
+
total_loss = None
|
1029 |
+
if start_positions is not None and end_positions is not None:
|
1030 |
+
# If we are on multi-GPU, split add a dimension
|
1031 |
+
if len(start_positions.size()) > 1:
|
1032 |
+
start_positions = start_positions.squeeze(-1)
|
1033 |
+
if len(end_positions.size()) > 1:
|
1034 |
+
end_positions = end_positions.squeeze(-1)
|
1035 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
1036 |
+
ignored_index = start_logits.size(1)
|
1037 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
1038 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
1039 |
+
|
1040 |
+
loss_fct = BCEWithLogitsLoss()
|
1041 |
+
start_loss = loss_fct(start_logits, start_positions)
|
1042 |
+
end_loss = loss_fct(end_logits, end_positions)
|
1043 |
+
total_loss = (start_loss + end_loss) / 2
|
1044 |
+
|
1045 |
+
if not return_dict:
|
1046 |
+
return tuple(
|
1047 |
+
i
|
1048 |
+
for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions]
|
1049 |
+
if i is not None
|
1050 |
+
)
|
1051 |
+
|
1052 |
+
return QuestionAnsweringModelOutput(
|
1053 |
+
loss=total_loss,
|
1054 |
+
start_logits=start_logits,
|
1055 |
+
end_logits=end_logits,
|
1056 |
+
hidden_states=result.hidden_states,
|
1057 |
+
attentions=result.attentions,
|
1058 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py
ADDED
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for Ernie-M."""
|
16 |
+
|
17 |
+
import io
|
18 |
+
import os
|
19 |
+
import unicodedata
|
20 |
+
from typing import Any, Dict, List, Optional, Tuple
|
21 |
+
|
22 |
+
import sentencepiece as spm
|
23 |
+
|
24 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
25 |
+
from ...utils import logging
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
SPIECE_UNDERLINE = "▁"
|
31 |
+
|
32 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
|
33 |
+
|
34 |
+
RESOURCE_FILES_NAMES = {
|
35 |
+
"sentencepiece_model_file": "sentencepiece.bpe.model",
|
36 |
+
"vocab_file": "vocab.txt",
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
# Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer
|
41 |
+
class ErnieMTokenizer(PreTrainedTokenizer):
|
42 |
+
r"""
|
43 |
+
Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
sentencepiece_model_file (`str`):
|
47 |
+
The file path of sentencepiece model.
|
48 |
+
vocab_file (`str`, *optional*):
|
49 |
+
The file path of the vocabulary.
|
50 |
+
do_lower_case (`str`, *optional*, defaults to `True`):
|
51 |
+
Whether or not to lowercase the input when tokenizing.
|
52 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
53 |
+
A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
|
54 |
+
`unk_token` inorder to be converted to an ID.
|
55 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
56 |
+
A special token separating two different sentences in the same input.
|
57 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
58 |
+
A special token used to make arrays of tokens the same size for batching purposes.
|
59 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
60 |
+
A special token used for sequence classification. It is the last token of the sequence when built with
|
61 |
+
special tokens.
|
62 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
63 |
+
A special token representing a masked token. This is the token used in the masked language modeling task
|
64 |
+
which the model tries to predict the original unmasked ones.
|
65 |
+
"""
|
66 |
+
|
67 |
+
# Ernie-M model doesn't have token_type embedding.
|
68 |
+
model_input_names: List[str] = ["input_ids"]
|
69 |
+
|
70 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
71 |
+
resource_files_names = RESOURCE_FILES_NAMES
|
72 |
+
|
73 |
+
def __init__(
|
74 |
+
self,
|
75 |
+
sentencepiece_model_ckpt,
|
76 |
+
vocab_file=None,
|
77 |
+
do_lower_case=False,
|
78 |
+
encoding="utf8",
|
79 |
+
unk_token="[UNK]",
|
80 |
+
sep_token="[SEP]",
|
81 |
+
pad_token="[PAD]",
|
82 |
+
cls_token="[CLS]",
|
83 |
+
mask_token="[MASK]",
|
84 |
+
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
85 |
+
**kwargs,
|
86 |
+
) -> None:
|
87 |
+
# Mask token behave like a normal word, i.e. include the space before it and
|
88 |
+
# is included in the raw text, there should be a match in a non-normalized sentence.
|
89 |
+
|
90 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
91 |
+
|
92 |
+
self.do_lower_case = do_lower_case
|
93 |
+
self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
|
94 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
95 |
+
self.sp_model.Load(sentencepiece_model_ckpt)
|
96 |
+
|
97 |
+
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
|
98 |
+
if vocab_file is not None:
|
99 |
+
self.vocab = self.load_vocab(filepath=vocab_file)
|
100 |
+
else:
|
101 |
+
self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
|
102 |
+
self.reverse_vocab = {v: k for k, v in self.vocab.items()}
|
103 |
+
|
104 |
+
super().__init__(
|
105 |
+
do_lower_case=do_lower_case,
|
106 |
+
unk_token=unk_token,
|
107 |
+
sep_token=sep_token,
|
108 |
+
pad_token=pad_token,
|
109 |
+
cls_token=cls_token,
|
110 |
+
mask_token=mask_token,
|
111 |
+
vocab_file=vocab_file,
|
112 |
+
encoding=encoding,
|
113 |
+
sp_model_kwargs=self.sp_model_kwargs,
|
114 |
+
**kwargs,
|
115 |
+
)
|
116 |
+
|
117 |
+
def get_offset_mapping(self, text):
|
118 |
+
if text is None:
|
119 |
+
return None
|
120 |
+
|
121 |
+
split_tokens = self.tokenize(text)
|
122 |
+
normalized_text, char_mapping = "", []
|
123 |
+
|
124 |
+
for i, ch in enumerate(text):
|
125 |
+
if ch in self.SP_CHAR_MAPPING:
|
126 |
+
ch = self.SP_CHAR_MAPPING.get(ch)
|
127 |
+
else:
|
128 |
+
ch = unicodedata.normalize("NFKC", ch)
|
129 |
+
if self.is_whitespace(ch):
|
130 |
+
continue
|
131 |
+
normalized_text += ch
|
132 |
+
char_mapping.extend([i] * len(ch))
|
133 |
+
|
134 |
+
text, token_mapping, offset = normalized_text, [], 0
|
135 |
+
|
136 |
+
if self.do_lower_case:
|
137 |
+
text = text.lower()
|
138 |
+
|
139 |
+
for token in split_tokens:
|
140 |
+
if token[:1] == "▁":
|
141 |
+
token = token[1:]
|
142 |
+
start = text[offset:].index(token) + offset
|
143 |
+
end = start + len(token)
|
144 |
+
|
145 |
+
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
|
146 |
+
offset = end
|
147 |
+
return token_mapping
|
148 |
+
|
149 |
+
@property
|
150 |
+
def vocab_size(self):
|
151 |
+
return len(self.vocab)
|
152 |
+
|
153 |
+
def get_vocab(self):
|
154 |
+
return dict(self.vocab, **self.added_tokens_encoder)
|
155 |
+
|
156 |
+
def __getstate__(self):
|
157 |
+
state = self.__dict__.copy()
|
158 |
+
state["sp_model"] = None
|
159 |
+
return state
|
160 |
+
|
161 |
+
def __setstate__(self, d):
|
162 |
+
self.__dict__ = d
|
163 |
+
|
164 |
+
# for backward compatibility
|
165 |
+
if not hasattr(self, "sp_model_kwargs"):
|
166 |
+
self.sp_model_kwargs = {}
|
167 |
+
|
168 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
169 |
+
self.sp_model.Load(self.sentencepiece_model_ckpt)
|
170 |
+
|
171 |
+
def clean_text(self, text):
|
172 |
+
"""Performs invalid character removal and whitespace cleanup on text."""
|
173 |
+
return "".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))
|
174 |
+
|
175 |
+
def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
|
176 |
+
"""Tokenize a string."""
|
177 |
+
|
178 |
+
if self.sp_model_kwargs.get("enable_sampling") is True:
|
179 |
+
enable_sampling = True
|
180 |
+
if self.sp_model_kwargs.get("alpha") is not None:
|
181 |
+
alpha = self.sp_model_kwargs.get("alpha")
|
182 |
+
if self.sp_model_kwargs.get("nbest_size") is not None:
|
183 |
+
nbest_size = self.sp_model_kwargs.get("nbest_size")
|
184 |
+
|
185 |
+
if not enable_sampling:
|
186 |
+
pieces = self.sp_model.EncodeAsPieces(text)
|
187 |
+
else:
|
188 |
+
pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
|
189 |
+
new_pieces = []
|
190 |
+
for pi, piece in enumerate(pieces):
|
191 |
+
if piece == SPIECE_UNDERLINE:
|
192 |
+
if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
|
193 |
+
new_pieces.append(SPIECE_UNDERLINE)
|
194 |
+
continue
|
195 |
+
else:
|
196 |
+
continue
|
197 |
+
lst_i = 0
|
198 |
+
for i, chunk in enumerate(piece):
|
199 |
+
if chunk == SPIECE_UNDERLINE:
|
200 |
+
continue
|
201 |
+
if self.is_ch_char(chunk) or self.is_punct(chunk):
|
202 |
+
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
|
203 |
+
new_pieces.append(piece[lst_i:i])
|
204 |
+
new_pieces.append(chunk)
|
205 |
+
lst_i = i + 1
|
206 |
+
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
|
207 |
+
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
|
208 |
+
new_pieces.append(piece[lst_i:i])
|
209 |
+
lst_i = i
|
210 |
+
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
|
211 |
+
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
|
212 |
+
new_pieces.append(piece[lst_i:i])
|
213 |
+
lst_i = i
|
214 |
+
if len(piece) > lst_i:
|
215 |
+
new_pieces.append(piece[lst_i:])
|
216 |
+
return new_pieces
|
217 |
+
|
218 |
+
def convert_tokens_to_string(self, tokens):
|
219 |
+
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
|
220 |
+
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
|
221 |
+
return out_string
|
222 |
+
|
223 |
+
def convert_ids_to_string(self, ids):
|
224 |
+
"""
|
225 |
+
Converts a sequence of tokens (strings for sub-words) in a single string.
|
226 |
+
"""
|
227 |
+
tokens = self.convert_ids_to_tokens(ids)
|
228 |
+
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
|
229 |
+
return out_string
|
230 |
+
|
231 |
+
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
|
232 |
+
def _convert_token_to_id(self, token):
|
233 |
+
return self.vocab.get(token, self.vocab.get(self.unk_token))
|
234 |
+
|
235 |
+
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
|
236 |
+
def _convert_id_to_token(self, index):
|
237 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
238 |
+
return self.reverse_vocab.get(index, self.unk_token)
|
239 |
+
|
240 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
241 |
+
r"""
|
242 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
243 |
+
adding special tokens. An ErnieM sequence has the following format:
|
244 |
+
|
245 |
+
- single sequence: `[CLS] X [SEP]`
|
246 |
+
- pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
|
247 |
+
|
248 |
+
Args:
|
249 |
+
token_ids_0 (`List[int]`):
|
250 |
+
List of IDs to which the special tokens will be added.
|
251 |
+
token_ids_1 (`List[int]`, *optional*):
|
252 |
+
Optional second list of IDs for sequence pairs.
|
253 |
+
Returns:
|
254 |
+
`List[int]`: List of input_id with the appropriate special tokens.
|
255 |
+
"""
|
256 |
+
if token_ids_1 is None:
|
257 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
258 |
+
_cls = [self.cls_token_id]
|
259 |
+
_sep = [self.sep_token_id]
|
260 |
+
return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
|
261 |
+
|
262 |
+
def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
|
263 |
+
r"""
|
264 |
+
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
|
265 |
+
offset_mapping has the following format:
|
266 |
+
|
267 |
+
- single sequence: `(0,0) X (0,0)`
|
268 |
+
- pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
|
269 |
+
|
270 |
+
Args:
|
271 |
+
offset_mapping_ids_0 (`List[tuple]`):
|
272 |
+
List of char offsets to which the special tokens will be added.
|
273 |
+
offset_mapping_ids_1 (`List[tuple]`, *optional*):
|
274 |
+
Optional second list of wordpiece offsets for offset mapping pairs.
|
275 |
+
Returns:
|
276 |
+
`List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
|
277 |
+
"""
|
278 |
+
if offset_mapping_1 is None:
|
279 |
+
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
|
280 |
+
|
281 |
+
return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
|
282 |
+
|
283 |
+
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
|
284 |
+
r"""
|
285 |
+
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
286 |
+
special tokens using the tokenizer `encode` method.
|
287 |
+
|
288 |
+
Args:
|
289 |
+
token_ids_0 (`List[int]`):
|
290 |
+
List of ids of the first sequence.
|
291 |
+
token_ids_1 (`List[int]`, *optional*):
|
292 |
+
Optional second list of IDs for sequence pairs.
|
293 |
+
already_has_special_tokens (`str`, *optional*, defaults to `False`):
|
294 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
295 |
+
Returns:
|
296 |
+
`List[int]`:
|
297 |
+
The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
298 |
+
"""
|
299 |
+
|
300 |
+
if already_has_special_tokens:
|
301 |
+
if token_ids_1 is not None:
|
302 |
+
raise ValueError(
|
303 |
+
"You should not supply a second sequence if the provided sequence of "
|
304 |
+
"ids is already formatted with special tokens for the model."
|
305 |
+
)
|
306 |
+
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
|
307 |
+
|
308 |
+
if token_ids_1 is not None:
|
309 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
310 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
311 |
+
|
312 |
+
def create_token_type_ids_from_sequences(
|
313 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
314 |
+
) -> List[int]:
|
315 |
+
"""
|
316 |
+
Create the token type IDs corresponding to the sequences passed. [What are token type
|
317 |
+
IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
|
318 |
+
building: those.
|
319 |
+
|
320 |
+
Args:
|
321 |
+
token_ids_0 (`List[int]`):
|
322 |
+
The first tokenized sequence.
|
323 |
+
token_ids_1 (`List[int]`, *optional*):
|
324 |
+
The second tokenized sequence.
|
325 |
+
Returns:
|
326 |
+
`List[int]`: The token type ids.
|
327 |
+
"""
|
328 |
+
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
|
329 |
+
if token_ids_1 is None:
|
330 |
+
# [CLS] X [SEP]
|
331 |
+
return (len(token_ids_0) + 2) * [0]
|
332 |
+
|
333 |
+
# [CLS] A [SEP] [SEP] B [SEP]
|
334 |
+
return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
|
335 |
+
|
336 |
+
def is_ch_char(self, char):
|
337 |
+
"""
|
338 |
+
is_ch_char
|
339 |
+
"""
|
340 |
+
if "\u4e00" <= char <= "\u9fff":
|
341 |
+
return True
|
342 |
+
return False
|
343 |
+
|
344 |
+
def is_alpha(self, char):
|
345 |
+
"""
|
346 |
+
is_alpha
|
347 |
+
"""
|
348 |
+
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
|
349 |
+
return True
|
350 |
+
return False
|
351 |
+
|
352 |
+
def is_punct(self, char):
|
353 |
+
"""
|
354 |
+
is_punct
|
355 |
+
"""
|
356 |
+
if char in ",;:.?!~,;:。?!《》【】":
|
357 |
+
return True
|
358 |
+
return False
|
359 |
+
|
360 |
+
def is_whitespace(self, char):
|
361 |
+
"""
|
362 |
+
is whitespace
|
363 |
+
"""
|
364 |
+
if char == " " or char == "\t" or char == "\n" or char == "\r":
|
365 |
+
return True
|
366 |
+
if len(char) == 1:
|
367 |
+
cat = unicodedata.category(char)
|
368 |
+
if cat == "Zs":
|
369 |
+
return True
|
370 |
+
return False
|
371 |
+
|
372 |
+
def load_vocab(self, filepath):
|
373 |
+
token_to_idx = {}
|
374 |
+
with io.open(filepath, "r", encoding="utf-8") as f:
|
375 |
+
for index, line in enumerate(f):
|
376 |
+
token = line.rstrip("\n")
|
377 |
+
token_to_idx[token] = int(index)
|
378 |
+
|
379 |
+
return token_to_idx
|
380 |
+
|
381 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
382 |
+
index = 0
|
383 |
+
if os.path.isdir(save_directory):
|
384 |
+
vocab_file = os.path.join(
|
385 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
386 |
+
)
|
387 |
+
else:
|
388 |
+
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
389 |
+
with open(vocab_file, "w", encoding="utf-8") as writer:
|
390 |
+
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
|
391 |
+
if index != token_index:
|
392 |
+
logger.warning(
|
393 |
+
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
394 |
+
" Please check that the vocabulary is not corrupted!"
|
395 |
+
)
|
396 |
+
index = token_index
|
397 |
+
writer.write(token + "\n")
|
398 |
+
index += 1
|
399 |
+
|
400 |
+
tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model")
|
401 |
+
with open(tokenizer_model_file, "wb") as fi:
|
402 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
403 |
+
fi.write(content_spiece_model)
|
404 |
+
|
405 |
+
return (vocab_file,)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__init__.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_sentencepiece_available,
|
20 |
+
is_tokenizers_available,
|
21 |
+
is_torch_available,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
_import_structure = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
|
26 |
+
|
27 |
+
try:
|
28 |
+
if not is_sentencepiece_available():
|
29 |
+
raise OptionalDependencyNotAvailable()
|
30 |
+
except OptionalDependencyNotAvailable:
|
31 |
+
pass
|
32 |
+
else:
|
33 |
+
_import_structure["tokenization_fnet"] = ["FNetTokenizer"]
|
34 |
+
|
35 |
+
try:
|
36 |
+
if not is_tokenizers_available():
|
37 |
+
raise OptionalDependencyNotAvailable()
|
38 |
+
except OptionalDependencyNotAvailable:
|
39 |
+
pass
|
40 |
+
else:
|
41 |
+
_import_structure["tokenization_fnet_fast"] = ["FNetTokenizerFast"]
|
42 |
+
|
43 |
+
try:
|
44 |
+
if not is_torch_available():
|
45 |
+
raise OptionalDependencyNotAvailable()
|
46 |
+
except OptionalDependencyNotAvailable:
|
47 |
+
pass
|
48 |
+
else:
|
49 |
+
_import_structure["modeling_fnet"] = [
|
50 |
+
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
|
51 |
+
"FNetForMaskedLM",
|
52 |
+
"FNetForMultipleChoice",
|
53 |
+
"FNetForNextSentencePrediction",
|
54 |
+
"FNetForPreTraining",
|
55 |
+
"FNetForQuestionAnswering",
|
56 |
+
"FNetForSequenceClassification",
|
57 |
+
"FNetForTokenClassification",
|
58 |
+
"FNetLayer",
|
59 |
+
"FNetModel",
|
60 |
+
"FNetPreTrainedModel",
|
61 |
+
]
|
62 |
+
|
63 |
+
|
64 |
+
if TYPE_CHECKING:
|
65 |
+
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
|
66 |
+
|
67 |
+
try:
|
68 |
+
if not is_sentencepiece_available():
|
69 |
+
raise OptionalDependencyNotAvailable()
|
70 |
+
except OptionalDependencyNotAvailable:
|
71 |
+
pass
|
72 |
+
else:
|
73 |
+
from .tokenization_fnet import FNetTokenizer
|
74 |
+
|
75 |
+
try:
|
76 |
+
if not is_tokenizers_available():
|
77 |
+
raise OptionalDependencyNotAvailable()
|
78 |
+
except OptionalDependencyNotAvailable:
|
79 |
+
pass
|
80 |
+
else:
|
81 |
+
from .tokenization_fnet_fast import FNetTokenizerFast
|
82 |
+
|
83 |
+
try:
|
84 |
+
if not is_torch_available():
|
85 |
+
raise OptionalDependencyNotAvailable()
|
86 |
+
except OptionalDependencyNotAvailable:
|
87 |
+
pass
|
88 |
+
else:
|
89 |
+
from .modeling_fnet import (
|
90 |
+
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
|
91 |
+
FNetForMaskedLM,
|
92 |
+
FNetForMultipleChoice,
|
93 |
+
FNetForNextSentencePrediction,
|
94 |
+
FNetForPreTraining,
|
95 |
+
FNetForQuestionAnswering,
|
96 |
+
FNetForSequenceClassification,
|
97 |
+
FNetForTokenClassification,
|
98 |
+
FNetLayer,
|
99 |
+
FNetModel,
|
100 |
+
FNetPreTrainedModel,
|
101 |
+
)
|
102 |
+
|
103 |
+
|
104 |
+
else:
|
105 |
+
import sys
|
106 |
+
|
107 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/convert_fnet_original_flax_checkpoint_to_pytorch.cpython-310.pyc
ADDED
Binary file (3.87 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/modeling_fnet.cpython-310.pyc
ADDED
Binary file (35.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet.cpython-310.pyc
ADDED
Binary file (12.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet_fast.cpython-310.pyc
ADDED
Binary file (7.01 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/configuration_fnet.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 Google AI and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" FNet model configuration"""
|
16 |
+
|
17 |
+
from ...configuration_utils import PretrainedConfig
|
18 |
+
from ...utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from ..deprecated._archive_maps import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class FNetConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`FNetModel`]. It is used to instantiate an FNet
|
30 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
31 |
+
defaults will yield a similar configuration to that of the FNet
|
32 |
+
[google/fnet-base](https://huggingface.co/google/fnet-base) architecture.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
40 |
+
Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the
|
41 |
+
`inputs_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
|
42 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
43 |
+
Dimension of the encoder layers and the pooler layer.
|
44 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
45 |
+
Number of hidden layers in the Transformer encoder.
|
46 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
47 |
+
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
48 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
|
49 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
50 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
51 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
52 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
53 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
54 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
55 |
+
just in case (e.g., 512 or 1024 or 2048).
|
56 |
+
type_vocab_size (`int`, *optional*, defaults to 4):
|
57 |
+
The vocabulary size of the `token_type_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
|
58 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
59 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
60 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
61 |
+
The epsilon used by the layer normalization layers.
|
62 |
+
use_tpu_fourier_optimizations (`bool`, *optional*, defaults to `False`):
|
63 |
+
Determines whether to use TPU optimized FFTs. If `True`, the model will favor axis-wise FFTs transforms.
|
64 |
+
Set to `False` for GPU/CPU hardware, in which case n-dimensional FFTs are used.
|
65 |
+
tpu_short_seq_length (`int`, *optional*, defaults to 512):
|
66 |
+
The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT
|
67 |
+
matrix only when *use_tpu_fourier_optimizations* is set to `True` and the input sequence is shorter than or
|
68 |
+
equal to 4096 tokens.
|
69 |
+
|
70 |
+
Example:
|
71 |
+
|
72 |
+
```python
|
73 |
+
>>> from transformers import FNetConfig, FNetModel
|
74 |
+
|
75 |
+
>>> # Initializing a FNet fnet-base style configuration
|
76 |
+
>>> configuration = FNetConfig()
|
77 |
+
|
78 |
+
>>> # Initializing a model (with random weights) from the fnet-base style configuration
|
79 |
+
>>> model = FNetModel(configuration)
|
80 |
+
|
81 |
+
>>> # Accessing the model configuration
|
82 |
+
>>> configuration = model.config
|
83 |
+
```"""
|
84 |
+
|
85 |
+
model_type = "fnet"
|
86 |
+
|
87 |
+
def __init__(
|
88 |
+
self,
|
89 |
+
vocab_size=32000,
|
90 |
+
hidden_size=768,
|
91 |
+
num_hidden_layers=12,
|
92 |
+
intermediate_size=3072,
|
93 |
+
hidden_act="gelu_new",
|
94 |
+
hidden_dropout_prob=0.1,
|
95 |
+
max_position_embeddings=512,
|
96 |
+
type_vocab_size=4,
|
97 |
+
initializer_range=0.02,
|
98 |
+
layer_norm_eps=1e-12,
|
99 |
+
use_tpu_fourier_optimizations=False,
|
100 |
+
tpu_short_seq_length=512,
|
101 |
+
pad_token_id=3,
|
102 |
+
bos_token_id=1,
|
103 |
+
eos_token_id=2,
|
104 |
+
**kwargs,
|
105 |
+
):
|
106 |
+
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
107 |
+
|
108 |
+
self.vocab_size = vocab_size
|
109 |
+
self.max_position_embeddings = max_position_embeddings
|
110 |
+
self.hidden_size = hidden_size
|
111 |
+
self.num_hidden_layers = num_hidden_layers
|
112 |
+
self.intermediate_size = intermediate_size
|
113 |
+
self.hidden_act = hidden_act
|
114 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
115 |
+
self.initializer_range = initializer_range
|
116 |
+
self.type_vocab_size = type_vocab_size
|
117 |
+
self.layer_norm_eps = layer_norm_eps
|
118 |
+
self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations
|
119 |
+
self.tpu_short_seq_length = tpu_short_seq_length
|
llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert FNet checkpoint."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
|
20 |
+
import torch
|
21 |
+
from flax.training.checkpoints import restore_checkpoint
|
22 |
+
|
23 |
+
from transformers import FNetConfig, FNetForPreTraining
|
24 |
+
from transformers.utils import logging
|
25 |
+
|
26 |
+
|
27 |
+
logging.set_verbosity_info()
|
28 |
+
|
29 |
+
|
30 |
+
def convert_flax_checkpoint_to_pytorch(flax_checkpoint_path, fnet_config_file, save_path):
|
31 |
+
# Initialise PyTorch model
|
32 |
+
config = FNetConfig.from_json_file(fnet_config_file)
|
33 |
+
print(f"Building PyTorch model from configuration: {config}")
|
34 |
+
fnet_pretraining_model = FNetForPreTraining(config)
|
35 |
+
|
36 |
+
checkpoint_dict = restore_checkpoint(flax_checkpoint_path, None)
|
37 |
+
pretrained_model_params = checkpoint_dict["target"]
|
38 |
+
|
39 |
+
# Embeddings
|
40 |
+
# Position IDs
|
41 |
+
state_dict = fnet_pretraining_model.state_dict()
|
42 |
+
|
43 |
+
position_ids = state_dict["fnet.embeddings.position_ids"]
|
44 |
+
new_state_dict = {"fnet.embeddings.position_ids": position_ids}
|
45 |
+
# Embedding Layers
|
46 |
+
new_state_dict["fnet.embeddings.word_embeddings.weight"] = torch.tensor(
|
47 |
+
pretrained_model_params["encoder"]["embedder"]["word"]["embedding"]
|
48 |
+
)
|
49 |
+
new_state_dict["fnet.embeddings.position_embeddings.weight"] = torch.tensor(
|
50 |
+
pretrained_model_params["encoder"]["embedder"]["position"]["embedding"][0]
|
51 |
+
)
|
52 |
+
new_state_dict["fnet.embeddings.token_type_embeddings.weight"] = torch.tensor(
|
53 |
+
pretrained_model_params["encoder"]["embedder"]["type"]["embedding"]
|
54 |
+
)
|
55 |
+
new_state_dict["fnet.embeddings.projection.weight"] = torch.tensor(
|
56 |
+
pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["kernel"]
|
57 |
+
).T
|
58 |
+
new_state_dict["fnet.embeddings.projection.bias"] = torch.tensor(
|
59 |
+
pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["bias"]
|
60 |
+
)
|
61 |
+
new_state_dict["fnet.embeddings.LayerNorm.weight"] = torch.tensor(
|
62 |
+
pretrained_model_params["encoder"]["embedder"]["layer_norm"]["scale"]
|
63 |
+
)
|
64 |
+
new_state_dict["fnet.embeddings.LayerNorm.bias"] = torch.tensor(
|
65 |
+
pretrained_model_params["encoder"]["embedder"]["layer_norm"]["bias"]
|
66 |
+
)
|
67 |
+
|
68 |
+
# Encoder Layers
|
69 |
+
for layer in range(config.num_hidden_layers):
|
70 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.weight"] = torch.tensor(
|
71 |
+
pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["scale"]
|
72 |
+
)
|
73 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.bias"] = torch.tensor(
|
74 |
+
pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["bias"]
|
75 |
+
)
|
76 |
+
|
77 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.weight"] = torch.tensor(
|
78 |
+
pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["kernel"]
|
79 |
+
).T
|
80 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.bias"] = torch.tensor(
|
81 |
+
pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["bias"]
|
82 |
+
)
|
83 |
+
|
84 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.weight"] = torch.tensor(
|
85 |
+
pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["kernel"]
|
86 |
+
).T
|
87 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.bias"] = torch.tensor(
|
88 |
+
pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["bias"]
|
89 |
+
)
|
90 |
+
|
91 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.weight"] = torch.tensor(
|
92 |
+
pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["scale"]
|
93 |
+
)
|
94 |
+
new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.bias"] = torch.tensor(
|
95 |
+
pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["bias"]
|
96 |
+
)
|
97 |
+
|
98 |
+
# Pooler Layers
|
99 |
+
new_state_dict["fnet.pooler.dense.weight"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["kernel"]).T
|
100 |
+
new_state_dict["fnet.pooler.dense.bias"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["bias"])
|
101 |
+
|
102 |
+
# Masked LM Layers
|
103 |
+
new_state_dict["cls.predictions.transform.dense.weight"] = torch.tensor(
|
104 |
+
pretrained_model_params["predictions_dense"]["kernel"]
|
105 |
+
).T
|
106 |
+
new_state_dict["cls.predictions.transform.dense.bias"] = torch.tensor(
|
107 |
+
pretrained_model_params["predictions_dense"]["bias"]
|
108 |
+
)
|
109 |
+
new_state_dict["cls.predictions.transform.LayerNorm.weight"] = torch.tensor(
|
110 |
+
pretrained_model_params["predictions_layer_norm"]["scale"]
|
111 |
+
)
|
112 |
+
new_state_dict["cls.predictions.transform.LayerNorm.bias"] = torch.tensor(
|
113 |
+
pretrained_model_params["predictions_layer_norm"]["bias"]
|
114 |
+
)
|
115 |
+
new_state_dict["cls.predictions.decoder.weight"] = torch.tensor(
|
116 |
+
pretrained_model_params["encoder"]["embedder"]["word"]["embedding"]
|
117 |
+
)
|
118 |
+
new_state_dict["cls.predictions.decoder.bias"] = torch.tensor(
|
119 |
+
pretrained_model_params["predictions_output"]["output_bias"]
|
120 |
+
)
|
121 |
+
new_state_dict["cls.predictions.bias"] = torch.tensor(pretrained_model_params["predictions_output"]["output_bias"])
|
122 |
+
|
123 |
+
# Seq Relationship Layers
|
124 |
+
new_state_dict["cls.seq_relationship.weight"] = torch.tensor(
|
125 |
+
pretrained_model_params["classification"]["output_kernel"]
|
126 |
+
)
|
127 |
+
new_state_dict["cls.seq_relationship.bias"] = torch.tensor(
|
128 |
+
pretrained_model_params["classification"]["output_bias"]
|
129 |
+
)
|
130 |
+
|
131 |
+
# Load State Dict
|
132 |
+
fnet_pretraining_model.load_state_dict(new_state_dict)
|
133 |
+
|
134 |
+
# Save PreTrained
|
135 |
+
print(f"Saving pretrained model to {save_path}")
|
136 |
+
fnet_pretraining_model.save_pretrained(save_path)
|
137 |
+
|
138 |
+
|
139 |
+
if __name__ == "__main__":
|
140 |
+
parser = argparse.ArgumentParser()
|
141 |
+
# Required parameters
|
142 |
+
parser.add_argument(
|
143 |
+
"--flax_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
|
144 |
+
)
|
145 |
+
parser.add_argument(
|
146 |
+
"--fnet_config_file",
|
147 |
+
default=None,
|
148 |
+
type=str,
|
149 |
+
required=True,
|
150 |
+
help=(
|
151 |
+
"The config json file corresponding to the pre-trained FNet model. \n"
|
152 |
+
"This specifies the model architecture."
|
153 |
+
),
|
154 |
+
)
|
155 |
+
parser.add_argument("--save_path", default=None, type=str, required=True, help="Path to the output model.")
|
156 |
+
args = parser.parse_args()
|
157 |
+
convert_flax_checkpoint_to_pytorch(args.flax_checkpoint_path, args.fnet_config_file, args.save_path)
|