Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__init__.py +89 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/configuration_ctrl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_ctrl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_tf_ctrl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/tokenization_ctrl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/configuration_ctrl.py +116 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/modeling_ctrl.py +841 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/modeling_tf_ctrl.py +931 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/tokenization_ctrl.py +249 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/_archive_maps.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/convert_bort_original_gluonnlp_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py +319 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__init__.py +95 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py +170 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/modeling_open_llama.py +969 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py +73 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py +107 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py +218 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py +517 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py +180 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py +97 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py +252 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py +819 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__init__.py +54 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/configuration_van.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/convert_van_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/modeling_van.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/configuration_van.py +110 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/convert_van_to_pytorch.py +291 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__init__.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
|
22 |
+
"tokenization_ctrl": ["CTRLTokenizer"],
|
23 |
+
}
|
24 |
+
|
25 |
+
try:
|
26 |
+
if not is_torch_available():
|
27 |
+
raise OptionalDependencyNotAvailable()
|
28 |
+
except OptionalDependencyNotAvailable:
|
29 |
+
pass
|
30 |
+
else:
|
31 |
+
_import_structure["modeling_ctrl"] = [
|
32 |
+
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
|
33 |
+
"CTRLForSequenceClassification",
|
34 |
+
"CTRLLMHeadModel",
|
35 |
+
"CTRLModel",
|
36 |
+
"CTRLPreTrainedModel",
|
37 |
+
]
|
38 |
+
|
39 |
+
try:
|
40 |
+
if not is_tf_available():
|
41 |
+
raise OptionalDependencyNotAvailable()
|
42 |
+
except OptionalDependencyNotAvailable:
|
43 |
+
pass
|
44 |
+
else:
|
45 |
+
_import_structure["modeling_tf_ctrl"] = [
|
46 |
+
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
|
47 |
+
"TFCTRLForSequenceClassification",
|
48 |
+
"TFCTRLLMHeadModel",
|
49 |
+
"TFCTRLModel",
|
50 |
+
"TFCTRLPreTrainedModel",
|
51 |
+
]
|
52 |
+
|
53 |
+
|
54 |
+
if TYPE_CHECKING:
|
55 |
+
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
|
56 |
+
from .tokenization_ctrl import CTRLTokenizer
|
57 |
+
|
58 |
+
try:
|
59 |
+
if not is_torch_available():
|
60 |
+
raise OptionalDependencyNotAvailable()
|
61 |
+
except OptionalDependencyNotAvailable:
|
62 |
+
pass
|
63 |
+
else:
|
64 |
+
from .modeling_ctrl import (
|
65 |
+
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
66 |
+
CTRLForSequenceClassification,
|
67 |
+
CTRLLMHeadModel,
|
68 |
+
CTRLModel,
|
69 |
+
CTRLPreTrainedModel,
|
70 |
+
)
|
71 |
+
|
72 |
+
try:
|
73 |
+
if not is_tf_available():
|
74 |
+
raise OptionalDependencyNotAvailable()
|
75 |
+
except OptionalDependencyNotAvailable:
|
76 |
+
pass
|
77 |
+
else:
|
78 |
+
from .modeling_tf_ctrl import (
|
79 |
+
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
80 |
+
TFCTRLForSequenceClassification,
|
81 |
+
TFCTRLLMHeadModel,
|
82 |
+
TFCTRLModel,
|
83 |
+
TFCTRLPreTrainedModel,
|
84 |
+
)
|
85 |
+
|
86 |
+
else:
|
87 |
+
import sys
|
88 |
+
|
89 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.36 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/configuration_ctrl.cpython-310.pyc
ADDED
Binary file (4.23 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_ctrl.cpython-310.pyc
ADDED
Binary file (26.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/modeling_tf_ctrl.cpython-310.pyc
ADDED
Binary file (29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/__pycache__/tokenization_ctrl.cpython-310.pyc
ADDED
Binary file (7.53 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/configuration_ctrl.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Salesforce and HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" Salesforce CTRL configuration"""
|
16 |
+
|
17 |
+
from ...configuration_utils import PretrainedConfig
|
18 |
+
from ...utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from ..deprecated._archive_maps import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class CTRLConfig(PretrainedConfig):
|
28 |
+
"""
|
29 |
+
This is the configuration class to store the configuration of a [`CTRLModel`] or a [`TFCTRLModel`]. It is used to
|
30 |
+
instantiate a CTRL model according to the specified arguments, defining the model architecture. Instantiating a
|
31 |
+
configuration with the defaults will yield a similar configuration to that of the
|
32 |
+
[Salesforce/ctrl](https://huggingface.co/Salesforce/ctrl) architecture from SalesForce.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_size (`int`, *optional*, defaults to 246534):
|
39 |
+
Vocabulary size of the CTRL model. Defines the number of different tokens that can be represented by the
|
40 |
+
`inputs_ids` passed when calling [`CTRLModel`] or [`TFCTRLModel`].
|
41 |
+
n_positions (`int`, *optional*, defaults to 256):
|
42 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
43 |
+
just in case (e.g., 512 or 1024 or 2048).
|
44 |
+
n_embd (`int`, *optional*, defaults to 1280):
|
45 |
+
Dimensionality of the embeddings and hidden states.
|
46 |
+
dff (`int`, *optional*, defaults to 8192):
|
47 |
+
Dimensionality of the inner dimension of the feed forward networks (FFN).
|
48 |
+
n_layer (`int`, *optional*, defaults to 48):
|
49 |
+
Number of hidden layers in the Transformer encoder.
|
50 |
+
n_head (`int`, *optional*, defaults to 16):
|
51 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
52 |
+
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
53 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
54 |
+
embd_pdrop (`int`, *optional*, defaults to 0.1):
|
55 |
+
The dropout ratio for the embeddings.
|
56 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):
|
57 |
+
The epsilon to use in the layer normalization layers
|
58 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
59 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
60 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
61 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
62 |
+
|
63 |
+
|
64 |
+
Examples:
|
65 |
+
|
66 |
+
```python
|
67 |
+
>>> from transformers import CTRLConfig, CTRLModel
|
68 |
+
|
69 |
+
>>> # Initializing a CTRL configuration
|
70 |
+
>>> configuration = CTRLConfig()
|
71 |
+
|
72 |
+
>>> # Initializing a model (with random weights) from the configuration
|
73 |
+
>>> model = CTRLModel(configuration)
|
74 |
+
|
75 |
+
>>> # Accessing the model configuration
|
76 |
+
>>> configuration = model.config
|
77 |
+
```"""
|
78 |
+
|
79 |
+
model_type = "ctrl"
|
80 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
81 |
+
attribute_map = {
|
82 |
+
"max_position_embeddings": "n_positions",
|
83 |
+
"hidden_size": "n_embd",
|
84 |
+
"num_attention_heads": "n_head",
|
85 |
+
"num_hidden_layers": "n_layer",
|
86 |
+
}
|
87 |
+
|
88 |
+
def __init__(
|
89 |
+
self,
|
90 |
+
vocab_size=246534,
|
91 |
+
n_positions=256,
|
92 |
+
n_embd=1280,
|
93 |
+
dff=8192,
|
94 |
+
n_layer=48,
|
95 |
+
n_head=16,
|
96 |
+
resid_pdrop=0.1,
|
97 |
+
embd_pdrop=0.1,
|
98 |
+
layer_norm_epsilon=1e-6,
|
99 |
+
initializer_range=0.02,
|
100 |
+
use_cache=True,
|
101 |
+
**kwargs,
|
102 |
+
):
|
103 |
+
self.vocab_size = vocab_size
|
104 |
+
self.n_positions = n_positions
|
105 |
+
self.n_embd = n_embd
|
106 |
+
self.n_layer = n_layer
|
107 |
+
self.n_head = n_head
|
108 |
+
self.dff = dff
|
109 |
+
self.resid_pdrop = resid_pdrop
|
110 |
+
self.embd_pdrop = embd_pdrop
|
111 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
112 |
+
self.initializer_range = initializer_range
|
113 |
+
|
114 |
+
self.use_cache = use_cache
|
115 |
+
|
116 |
+
super().__init__(**kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/modeling_ctrl.py
ADDED
@@ -0,0 +1,841 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Salesforce and HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" PyTorch CTRL model."""
|
17 |
+
|
18 |
+
from typing import Optional, Tuple, Union
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import torch
|
22 |
+
from torch import nn
|
23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
24 |
+
|
25 |
+
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput
|
26 |
+
from ...modeling_utils import PreTrainedModel
|
27 |
+
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer
|
28 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
29 |
+
from .configuration_ctrl import CTRLConfig
|
30 |
+
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
_CONFIG_FOR_DOC = "CTRLConfig"
|
35 |
+
|
36 |
+
|
37 |
+
from ..deprecated._archive_maps import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
38 |
+
|
39 |
+
|
40 |
+
def angle_defn(pos, i, d_model_size):
|
41 |
+
angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size)
|
42 |
+
return pos * angle_rates
|
43 |
+
|
44 |
+
|
45 |
+
def positional_encoding(position, d_model_size, dtype):
|
46 |
+
# create the sinusoidal pattern for the positional encoding
|
47 |
+
angle_rads = angle_defn(
|
48 |
+
torch.arange(position, dtype=torch.int64).to(dtype).unsqueeze(1),
|
49 |
+
torch.arange(d_model_size, dtype=torch.int64).to(dtype).unsqueeze(0),
|
50 |
+
d_model_size,
|
51 |
+
)
|
52 |
+
|
53 |
+
sines = torch.sin(angle_rads[:, 0::2])
|
54 |
+
cosines = torch.cos(angle_rads[:, 1::2])
|
55 |
+
|
56 |
+
pos_encoding = torch.cat([sines, cosines], dim=-1)
|
57 |
+
return pos_encoding
|
58 |
+
|
59 |
+
|
60 |
+
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
|
61 |
+
# calculate attention
|
62 |
+
matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))
|
63 |
+
|
64 |
+
dk = k.shape[-1]
|
65 |
+
scaled_attention_logits = matmul_qk / np.sqrt(dk)
|
66 |
+
|
67 |
+
if mask is not None:
|
68 |
+
nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1)
|
69 |
+
scaled_attention_logits += mask[ns - nd : ns, :ns] * -1e4
|
70 |
+
|
71 |
+
if attention_mask is not None:
|
72 |
+
# Apply the attention mask
|
73 |
+
scaled_attention_logits = scaled_attention_logits + attention_mask
|
74 |
+
|
75 |
+
attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
|
76 |
+
|
77 |
+
# Mask heads if we want to
|
78 |
+
if head_mask is not None:
|
79 |
+
attention_weights = attention_weights * head_mask
|
80 |
+
|
81 |
+
output = torch.matmul(attention_weights, v)
|
82 |
+
|
83 |
+
return output, attention_weights
|
84 |
+
|
85 |
+
|
86 |
+
class MultiHeadAttention(nn.Module):
|
87 |
+
def __init__(self, d_model_size, num_heads):
|
88 |
+
super().__init__()
|
89 |
+
self.num_heads = num_heads
|
90 |
+
self.d_model_size = d_model_size
|
91 |
+
|
92 |
+
self.depth = int(d_model_size / self.num_heads)
|
93 |
+
|
94 |
+
self.Wq = nn.Linear(d_model_size, d_model_size)
|
95 |
+
self.Wk = nn.Linear(d_model_size, d_model_size)
|
96 |
+
self.Wv = nn.Linear(d_model_size, d_model_size)
|
97 |
+
|
98 |
+
self.dense = nn.Linear(d_model_size, d_model_size)
|
99 |
+
self.pruned_heads = set()
|
100 |
+
|
101 |
+
def prune_heads(self, heads):
|
102 |
+
attention_head_size = self.d_model_size // self.num_heads
|
103 |
+
if len(heads) == 0:
|
104 |
+
return
|
105 |
+
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)
|
106 |
+
|
107 |
+
# Prune linear layers
|
108 |
+
self.Wq = prune_linear_layer(self.Wq, index)
|
109 |
+
self.Wk = prune_linear_layer(self.Wk, index)
|
110 |
+
self.Wv = prune_linear_layer(self.Wv, index)
|
111 |
+
self.dense = prune_linear_layer(self.dense, index, dim=1)
|
112 |
+
|
113 |
+
# Update hyper params
|
114 |
+
self.num_heads = self.num_heads - len(heads)
|
115 |
+
self.d_model_size = attention_head_size * self.num_heads
|
116 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
117 |
+
|
118 |
+
def split_into_heads(self, x, batch_size):
|
119 |
+
x = x.reshape(batch_size, -1, self.num_heads, self.depth)
|
120 |
+
return x.permute([0, 2, 1, 3])
|
121 |
+
|
122 |
+
def forward(
|
123 |
+
self,
|
124 |
+
v,
|
125 |
+
k,
|
126 |
+
q,
|
127 |
+
mask,
|
128 |
+
layer_past=None,
|
129 |
+
attention_mask=None,
|
130 |
+
head_mask=None,
|
131 |
+
use_cache=False,
|
132 |
+
output_attentions=False,
|
133 |
+
):
|
134 |
+
batch_size = q.shape[0]
|
135 |
+
|
136 |
+
q = self.Wq(q)
|
137 |
+
k = self.Wk(k)
|
138 |
+
v = self.Wv(v)
|
139 |
+
|
140 |
+
q = self.split_into_heads(q, batch_size)
|
141 |
+
k = self.split_into_heads(k, batch_size)
|
142 |
+
v = self.split_into_heads(v, batch_size)
|
143 |
+
if layer_past is not None:
|
144 |
+
past_key, past_value = layer_past[0], layer_past[1]
|
145 |
+
k = torch.cat((past_key, k), dim=-2)
|
146 |
+
v = torch.cat((past_value, v), dim=-2)
|
147 |
+
|
148 |
+
if use_cache is True:
|
149 |
+
present = torch.stack((k, v))
|
150 |
+
else:
|
151 |
+
present = (None,)
|
152 |
+
|
153 |
+
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
|
154 |
+
scaled_attention = output[0].permute([0, 2, 1, 3])
|
155 |
+
attn = output[1]
|
156 |
+
original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)
|
157 |
+
output = self.dense(original_size_attention)
|
158 |
+
|
159 |
+
outputs = (output, present)
|
160 |
+
if output_attentions:
|
161 |
+
outputs = outputs + (attn,)
|
162 |
+
return outputs
|
163 |
+
|
164 |
+
|
165 |
+
def point_wise_feed_forward_network(d_model_size, dff):
|
166 |
+
return nn.Sequential(nn.Linear(d_model_size, dff), nn.ReLU(), nn.Linear(dff, d_model_size))
|
167 |
+
|
168 |
+
|
169 |
+
class EncoderLayer(nn.Module):
|
170 |
+
def __init__(self, d_model_size, num_heads, dff, rate=0.1):
|
171 |
+
super().__init__()
|
172 |
+
|
173 |
+
self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads)
|
174 |
+
self.ffn = point_wise_feed_forward_network(d_model_size, dff)
|
175 |
+
|
176 |
+
self.layernorm1 = nn.LayerNorm(d_model_size, eps=1e-6)
|
177 |
+
self.layernorm2 = nn.LayerNorm(d_model_size, eps=1e-6)
|
178 |
+
|
179 |
+
self.dropout1 = nn.Dropout(rate)
|
180 |
+
self.dropout2 = nn.Dropout(rate)
|
181 |
+
|
182 |
+
def forward(
|
183 |
+
self, x, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False
|
184 |
+
):
|
185 |
+
normed = self.layernorm1(x)
|
186 |
+
attn_outputs = self.multi_head_attention(
|
187 |
+
normed,
|
188 |
+
normed,
|
189 |
+
normed,
|
190 |
+
mask,
|
191 |
+
layer_past=layer_past,
|
192 |
+
attention_mask=attention_mask,
|
193 |
+
head_mask=head_mask,
|
194 |
+
use_cache=use_cache,
|
195 |
+
output_attentions=output_attentions,
|
196 |
+
)
|
197 |
+
attn_output = attn_outputs[0]
|
198 |
+
attn_output = self.dropout1(attn_output)
|
199 |
+
out1 = x + attn_output
|
200 |
+
|
201 |
+
out2 = self.layernorm2(out1)
|
202 |
+
ffn_output = self.ffn(out2)
|
203 |
+
ffn_output = self.dropout2(ffn_output)
|
204 |
+
out2 = out1 + ffn_output
|
205 |
+
|
206 |
+
outputs = (out2,) + attn_outputs[1:]
|
207 |
+
return outputs
|
208 |
+
|
209 |
+
|
210 |
+
class CTRLPreTrainedModel(PreTrainedModel):
|
211 |
+
"""
|
212 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
213 |
+
models.
|
214 |
+
"""
|
215 |
+
|
216 |
+
config_class = CTRLConfig
|
217 |
+
base_model_prefix = "transformer"
|
218 |
+
|
219 |
+
def _init_weights(self, module):
|
220 |
+
"""Initialize the weights."""
|
221 |
+
if isinstance(module, (nn.Linear, Conv1D)):
|
222 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
223 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
224 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
225 |
+
if module.bias is not None:
|
226 |
+
module.bias.data.zero_()
|
227 |
+
elif isinstance(module, nn.Embedding):
|
228 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
229 |
+
if module.padding_idx is not None:
|
230 |
+
module.weight.data[module.padding_idx].zero_()
|
231 |
+
elif isinstance(module, nn.LayerNorm):
|
232 |
+
module.bias.data.zero_()
|
233 |
+
module.weight.data.fill_(1.0)
|
234 |
+
|
235 |
+
|
236 |
+
CTRL_START_DOCSTRING = r"""
|
237 |
+
|
238 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
239 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
240 |
+
etc.)
|
241 |
+
|
242 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
243 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
244 |
+
and behavior.
|
245 |
+
|
246 |
+
Parameters:
|
247 |
+
config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.
|
248 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
249 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
250 |
+
"""
|
251 |
+
|
252 |
+
CTRL_INPUTS_DOCSTRING = r"""
|
253 |
+
Args:
|
254 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
255 |
+
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]`
|
256 |
+
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
|
257 |
+
|
258 |
+
If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as
|
259 |
+
`input_ids`.
|
260 |
+
|
261 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
262 |
+
[`PreTrainedTokenizer.encode`] for details.
|
263 |
+
|
264 |
+
[What are input IDs?](../glossary#input-ids)
|
265 |
+
past_key_values (`Tuple[Tuple[torch.FloatTensor]]` of length `config.n_layers`):
|
266 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
267 |
+
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
|
268 |
+
their past given to this model should not be passed as input ids as they have already been computed.
|
269 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
270 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
271 |
+
|
272 |
+
- 1 for tokens that are **not masked**,
|
273 |
+
- 0 for tokens that are **masked**.
|
274 |
+
|
275 |
+
[What are attention masks?](../glossary#attention-mask)
|
276 |
+
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
277 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
278 |
+
1]`:
|
279 |
+
|
280 |
+
- 0 corresponds to a *sentence A* token,
|
281 |
+
- 1 corresponds to a *sentence B* token.
|
282 |
+
|
283 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
284 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
285 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
286 |
+
config.max_position_embeddings - 1]`.
|
287 |
+
|
288 |
+
[What are position IDs?](../glossary#position-ids)
|
289 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
290 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
291 |
+
|
292 |
+
- 1 indicates the head is **not masked**,
|
293 |
+
- 0 indicates the head is **masked**.
|
294 |
+
|
295 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
296 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
297 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
298 |
+
model's internal embedding lookup matrix.
|
299 |
+
use_cache (`bool`, *optional*):
|
300 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
301 |
+
`past_key_values`).
|
302 |
+
output_attentions (`bool`, *optional*):
|
303 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
304 |
+
tensors for more detail.
|
305 |
+
output_hidden_states (`bool`, *optional*):
|
306 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
307 |
+
more detail.
|
308 |
+
return_dict (`bool`, *optional*):
|
309 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
310 |
+
"""
|
311 |
+
|
312 |
+
|
313 |
+
@add_start_docstrings(
|
314 |
+
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
|
315 |
+
CTRL_START_DOCSTRING,
|
316 |
+
)
|
317 |
+
class CTRLModel(CTRLPreTrainedModel):
|
318 |
+
def __init__(self, config):
|
319 |
+
super().__init__(config)
|
320 |
+
|
321 |
+
self.d_model_size = config.n_embd
|
322 |
+
self.num_layers = config.n_layer
|
323 |
+
|
324 |
+
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)
|
325 |
+
|
326 |
+
self.w = nn.Embedding(config.vocab_size, config.n_embd)
|
327 |
+
|
328 |
+
self.dropout = nn.Dropout(config.embd_pdrop)
|
329 |
+
self.h = nn.ModuleList(
|
330 |
+
[EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop) for _ in range(config.n_layer)]
|
331 |
+
)
|
332 |
+
self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
333 |
+
|
334 |
+
# Initialize weights and apply final processing
|
335 |
+
self.post_init()
|
336 |
+
|
337 |
+
def get_input_embeddings(self):
|
338 |
+
return self.w
|
339 |
+
|
340 |
+
def set_input_embeddings(self, new_embeddings):
|
341 |
+
self.w = new_embeddings
|
342 |
+
|
343 |
+
def _prune_heads(self, heads_to_prune):
|
344 |
+
"""
|
345 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
346 |
+
"""
|
347 |
+
for layer, heads in heads_to_prune.items():
|
348 |
+
self.h[layer].multi_head_attention.prune_heads(heads)
|
349 |
+
|
350 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
351 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
352 |
+
def forward(
|
353 |
+
self,
|
354 |
+
input_ids: Optional[torch.LongTensor] = None,
|
355 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
356 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
357 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
358 |
+
position_ids: Optional[torch.LongTensor] = None,
|
359 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
360 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
361 |
+
use_cache: Optional[bool] = None,
|
362 |
+
output_attentions: Optional[bool] = None,
|
363 |
+
output_hidden_states: Optional[bool] = None,
|
364 |
+
return_dict: Optional[bool] = None,
|
365 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
|
366 |
+
r"""
|
367 |
+
Returns:
|
368 |
+
|
369 |
+
Example:
|
370 |
+
|
371 |
+
```python
|
372 |
+
>>> from transformers import AutoTokenizer, CTRLModel
|
373 |
+
>>> import torch
|
374 |
+
|
375 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
376 |
+
>>> model = CTRLModel.from_pretrained("Salesforce/ctrl")
|
377 |
+
|
378 |
+
>>> # CTRL was trained with control codes as the first token
|
379 |
+
>>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
|
380 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
381 |
+
|
382 |
+
>>> outputs = model(**inputs)
|
383 |
+
|
384 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
385 |
+
>>> list(last_hidden_states.shape)
|
386 |
+
[1, 5, 1280]
|
387 |
+
```"""
|
388 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
389 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
390 |
+
output_hidden_states = (
|
391 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
392 |
+
)
|
393 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
394 |
+
|
395 |
+
if input_ids is not None and inputs_embeds is not None:
|
396 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
397 |
+
elif input_ids is not None:
|
398 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
399 |
+
input_shape = input_ids.size()
|
400 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
401 |
+
batch_size = input_ids.shape[0]
|
402 |
+
elif inputs_embeds is not None:
|
403 |
+
input_shape = inputs_embeds.size()[:-1]
|
404 |
+
batch_size = inputs_embeds.shape[0]
|
405 |
+
else:
|
406 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
407 |
+
|
408 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
409 |
+
|
410 |
+
if past_key_values is None:
|
411 |
+
past_length = 0
|
412 |
+
past_key_values = tuple([None] * len(self.h))
|
413 |
+
else:
|
414 |
+
past_length = past_key_values[0][0].size(-2)
|
415 |
+
if position_ids is None:
|
416 |
+
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
|
417 |
+
position_ids = position_ids.unsqueeze(0)
|
418 |
+
|
419 |
+
# Attention mask.
|
420 |
+
if attention_mask is not None:
|
421 |
+
if batch_size <= 0:
|
422 |
+
raise ValueError("batch_size has to be defined and > 0")
|
423 |
+
attention_mask = attention_mask.view(batch_size, -1)
|
424 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
425 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
426 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
427 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
428 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
429 |
+
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
430 |
+
|
431 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
432 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
433 |
+
# positions we want to attend and the dtype's smallest value for masked positions.
|
434 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
435 |
+
# effectively the same as removing these entirely.
|
436 |
+
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
437 |
+
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
|
438 |
+
|
439 |
+
# Prepare head mask if needed
|
440 |
+
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
|
441 |
+
|
442 |
+
if token_type_ids is not None:
|
443 |
+
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
444 |
+
token_type_embeds = self.w(token_type_ids)
|
445 |
+
token_type_embeds *= np.sqrt(self.d_model_size)
|
446 |
+
else:
|
447 |
+
token_type_embeds = 0
|
448 |
+
|
449 |
+
if inputs_embeds is None:
|
450 |
+
inputs_embeds = self.w(input_ids)
|
451 |
+
# inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
|
452 |
+
seq_len = input_shape[-1]
|
453 |
+
mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(device)
|
454 |
+
|
455 |
+
inputs_embeds *= np.sqrt(self.d_model_size)
|
456 |
+
|
457 |
+
# `self.pos_encoding` won't be sent to the correct device along the model, so we do it manually.
|
458 |
+
self.pos_encoding = self.pos_encoding.to(device)
|
459 |
+
pos_embeds = self.pos_encoding[position_ids, :]
|
460 |
+
|
461 |
+
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
|
462 |
+
|
463 |
+
hidden_states = self.dropout(hidden_states)
|
464 |
+
|
465 |
+
presents = () if use_cache else None
|
466 |
+
all_hidden_states = () if output_hidden_states else None
|
467 |
+
all_attentions = () if output_attentions else None
|
468 |
+
for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)):
|
469 |
+
if output_hidden_states:
|
470 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
471 |
+
outputs = h(
|
472 |
+
hidden_states,
|
473 |
+
mask,
|
474 |
+
layer_past=layer_past,
|
475 |
+
attention_mask=attention_mask,
|
476 |
+
head_mask=head_mask[i],
|
477 |
+
use_cache=use_cache,
|
478 |
+
output_attentions=output_attentions,
|
479 |
+
)
|
480 |
+
hidden_states, present = outputs[:2]
|
481 |
+
if use_cache is True:
|
482 |
+
presents = presents + (present,)
|
483 |
+
|
484 |
+
if output_attentions:
|
485 |
+
all_attentions += (outputs[2],)
|
486 |
+
|
487 |
+
hidden_states = self.layernorm(hidden_states)
|
488 |
+
if output_hidden_states:
|
489 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
490 |
+
|
491 |
+
if not return_dict:
|
492 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
493 |
+
|
494 |
+
return BaseModelOutputWithPast(
|
495 |
+
last_hidden_state=hidden_states,
|
496 |
+
past_key_values=presents,
|
497 |
+
hidden_states=all_hidden_states,
|
498 |
+
attentions=all_attentions,
|
499 |
+
)
|
500 |
+
|
501 |
+
|
502 |
+
@add_start_docstrings(
|
503 |
+
"""
|
504 |
+
The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input
|
505 |
+
embeddings).
|
506 |
+
""",
|
507 |
+
CTRL_START_DOCSTRING,
|
508 |
+
)
|
509 |
+
class CTRLLMHeadModel(CTRLPreTrainedModel):
|
510 |
+
_tied_weights_keys = ["lm_head.weight"]
|
511 |
+
|
512 |
+
def __init__(self, config):
|
513 |
+
super().__init__(config)
|
514 |
+
self.transformer = CTRLModel(config)
|
515 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)
|
516 |
+
|
517 |
+
# Initialize weights and apply final processing
|
518 |
+
self.post_init()
|
519 |
+
|
520 |
+
def get_output_embeddings(self):
|
521 |
+
return self.lm_head
|
522 |
+
|
523 |
+
def set_output_embeddings(self, new_embeddings):
|
524 |
+
self.lm_head = new_embeddings
|
525 |
+
|
526 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, **kwargs):
|
527 |
+
# only last tokens for inputs_ids if past is defined in kwargs
|
528 |
+
if past_key_values is not None:
|
529 |
+
past_length = past_key_values[0][0].shape[2]
|
530 |
+
|
531 |
+
# Some generation methods already pass only the last input ID
|
532 |
+
if input_ids.shape[1] > past_length:
|
533 |
+
remove_prefix_length = past_length
|
534 |
+
else:
|
535 |
+
# Default to old behavior: keep only final ID
|
536 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
537 |
+
|
538 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
539 |
+
|
540 |
+
return {"input_ids": input_ids, "past_key_values": past_key_values, "use_cache": use_cache}
|
541 |
+
|
542 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
543 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
544 |
+
def forward(
|
545 |
+
self,
|
546 |
+
input_ids: Optional[torch.LongTensor] = None,
|
547 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
548 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
549 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
550 |
+
position_ids: Optional[torch.LongTensor] = None,
|
551 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
552 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
553 |
+
labels: Optional[torch.LongTensor] = None,
|
554 |
+
use_cache: Optional[bool] = None,
|
555 |
+
output_attentions: Optional[bool] = None,
|
556 |
+
output_hidden_states: Optional[bool] = None,
|
557 |
+
return_dict: Optional[bool] = None,
|
558 |
+
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
|
559 |
+
r"""
|
560 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
561 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
562 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
563 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
564 |
+
|
565 |
+
Returns:
|
566 |
+
|
567 |
+
Example:
|
568 |
+
|
569 |
+
```python
|
570 |
+
>>> import torch
|
571 |
+
>>> from transformers import AutoTokenizer, CTRLLMHeadModel
|
572 |
+
|
573 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
574 |
+
>>> model = CTRLLMHeadModel.from_pretrained("Salesforce/ctrl")
|
575 |
+
|
576 |
+
>>> # CTRL was trained with control codes as the first token
|
577 |
+
>>> inputs = tokenizer("Wikipedia The llama is", return_tensors="pt")
|
578 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
579 |
+
|
580 |
+
>>> sequence_ids = model.generate(inputs["input_ids"])
|
581 |
+
>>> sequences = tokenizer.batch_decode(sequence_ids)
|
582 |
+
>>> sequences
|
583 |
+
['Wikipedia The llama is a member of the family Bovidae. It is native to the Andes of Peru,']
|
584 |
+
|
585 |
+
>>> outputs = model(**inputs, labels=inputs["input_ids"])
|
586 |
+
>>> round(outputs.loss.item(), 2)
|
587 |
+
9.21
|
588 |
+
|
589 |
+
>>> list(outputs.logits.shape)
|
590 |
+
[1, 5, 246534]
|
591 |
+
```"""
|
592 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
593 |
+
|
594 |
+
transformer_outputs = self.transformer(
|
595 |
+
input_ids,
|
596 |
+
past_key_values=past_key_values,
|
597 |
+
attention_mask=attention_mask,
|
598 |
+
token_type_ids=token_type_ids,
|
599 |
+
position_ids=position_ids,
|
600 |
+
head_mask=head_mask,
|
601 |
+
inputs_embeds=inputs_embeds,
|
602 |
+
use_cache=use_cache,
|
603 |
+
output_attentions=output_attentions,
|
604 |
+
output_hidden_states=output_hidden_states,
|
605 |
+
return_dict=return_dict,
|
606 |
+
)
|
607 |
+
|
608 |
+
hidden_states = transformer_outputs[0]
|
609 |
+
|
610 |
+
lm_logits = self.lm_head(hidden_states)
|
611 |
+
|
612 |
+
loss = None
|
613 |
+
if labels is not None:
|
614 |
+
# Shift so that tokens < n predict n
|
615 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
616 |
+
shift_labels = labels[..., 1:].contiguous()
|
617 |
+
# Flatten the tokens
|
618 |
+
loss_fct = CrossEntropyLoss()
|
619 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
620 |
+
|
621 |
+
if not return_dict:
|
622 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
623 |
+
return ((loss,) + output) if loss is not None else output
|
624 |
+
|
625 |
+
return CausalLMOutputWithPast(
|
626 |
+
loss=loss,
|
627 |
+
logits=lm_logits,
|
628 |
+
past_key_values=transformer_outputs.past_key_values,
|
629 |
+
hidden_states=transformer_outputs.hidden_states,
|
630 |
+
attentions=transformer_outputs.attentions,
|
631 |
+
)
|
632 |
+
|
633 |
+
@staticmethod
|
634 |
+
def _reorder_cache(
|
635 |
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
636 |
+
) -> Tuple[Tuple[torch.Tensor]]:
|
637 |
+
"""
|
638 |
+
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
|
639 |
+
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
|
640 |
+
beam_idx at every generation step.
|
641 |
+
"""
|
642 |
+
return tuple(
|
643 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
|
644 |
+
for layer_past in past_key_values
|
645 |
+
)
|
646 |
+
|
647 |
+
|
648 |
+
@add_start_docstrings(
|
649 |
+
"""
|
650 |
+
The CTRL Model transformer with a sequence classification head on top (linear layer).
|
651 |
+
[`CTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
652 |
+
(e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last
|
653 |
+
token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in
|
654 |
+
each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
|
655 |
+
guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last
|
656 |
+
value in each row of the batch).
|
657 |
+
""",
|
658 |
+
CTRL_START_DOCSTRING,
|
659 |
+
)
|
660 |
+
class CTRLForSequenceClassification(CTRLPreTrainedModel):
|
661 |
+
def __init__(self, config):
|
662 |
+
super().__init__(config)
|
663 |
+
self.num_labels = config.num_labels
|
664 |
+
self.transformer = CTRLModel(config)
|
665 |
+
self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False)
|
666 |
+
|
667 |
+
# Initialize weights and apply final processing
|
668 |
+
self.post_init()
|
669 |
+
|
670 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
671 |
+
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
672 |
+
def forward(
|
673 |
+
self,
|
674 |
+
input_ids: Optional[torch.LongTensor] = None,
|
675 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
676 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
677 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
678 |
+
position_ids: Optional[torch.LongTensor] = None,
|
679 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
680 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
681 |
+
labels: Optional[torch.LongTensor] = None,
|
682 |
+
use_cache: Optional[bool] = None,
|
683 |
+
output_attentions: Optional[bool] = None,
|
684 |
+
output_hidden_states: Optional[bool] = None,
|
685 |
+
return_dict: Optional[bool] = None,
|
686 |
+
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
|
687 |
+
r"""
|
688 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
689 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
690 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
691 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
692 |
+
|
693 |
+
Returns:
|
694 |
+
|
695 |
+
Example of single-label classification:
|
696 |
+
|
697 |
+
```python
|
698 |
+
>>> import torch
|
699 |
+
>>> from transformers import AutoTokenizer, CTRLForSequenceClassification
|
700 |
+
|
701 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
702 |
+
>>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl")
|
703 |
+
|
704 |
+
>>> # CTRL was trained with control codes as the first token
|
705 |
+
>>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
|
706 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
707 |
+
|
708 |
+
>>> with torch.no_grad():
|
709 |
+
... logits = model(**inputs).logits
|
710 |
+
|
711 |
+
>>> predicted_class_id = logits.argmax().item()
|
712 |
+
>>> model.config.id2label[predicted_class_id]
|
713 |
+
'LABEL_0'
|
714 |
+
```
|
715 |
+
|
716 |
+
```python
|
717 |
+
>>> import torch
|
718 |
+
|
719 |
+
>>> torch.manual_seed(42) # doctest: +IGNORE_RESULT
|
720 |
+
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
|
721 |
+
>>> num_labels = len(model.config.id2label)
|
722 |
+
>>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels)
|
723 |
+
|
724 |
+
>>> labels = torch.tensor(1)
|
725 |
+
>>> loss = model(**inputs, labels=labels).loss
|
726 |
+
>>> round(loss.item(), 2)
|
727 |
+
0.93
|
728 |
+
```
|
729 |
+
|
730 |
+
Example of multi-label classification:
|
731 |
+
|
732 |
+
```python
|
733 |
+
>>> import torch
|
734 |
+
>>> from transformers import AutoTokenizer, CTRLForSequenceClassification
|
735 |
+
|
736 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
|
737 |
+
>>> model = CTRLForSequenceClassification.from_pretrained(
|
738 |
+
... "Salesforce/ctrl", problem_type="multi_label_classification"
|
739 |
+
... )
|
740 |
+
|
741 |
+
>>> # CTRL was trained with control codes as the first token
|
742 |
+
>>> inputs = tokenizer("Opinion My dog is cute", return_tensors="pt")
|
743 |
+
>>> assert inputs["input_ids"][0, 0].item() in tokenizer.control_codes.values()
|
744 |
+
|
745 |
+
>>> with torch.no_grad():
|
746 |
+
... logits = model(**inputs).logits
|
747 |
+
|
748 |
+
>>> predicted_class_id = logits.argmax().item()
|
749 |
+
>>> model.config.id2label[predicted_class_id]
|
750 |
+
'LABEL_0'
|
751 |
+
```
|
752 |
+
|
753 |
+
```python
|
754 |
+
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
|
755 |
+
>>> num_labels = len(model.config.id2label)
|
756 |
+
>>> model = CTRLForSequenceClassification.from_pretrained("Salesforce/ctrl", num_labels=num_labels)
|
757 |
+
|
758 |
+
>>> num_labels = len(model.config.id2label)
|
759 |
+
>>> labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to(
|
760 |
+
... torch.float
|
761 |
+
... )
|
762 |
+
>>> loss = model(**inputs, labels=labels).loss
|
763 |
+
>>> loss.backward() # doctest: +IGNORE_RESULT
|
764 |
+
```"""
|
765 |
+
|
766 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
767 |
+
|
768 |
+
transformer_outputs = self.transformer(
|
769 |
+
input_ids,
|
770 |
+
past_key_values=past_key_values,
|
771 |
+
attention_mask=attention_mask,
|
772 |
+
token_type_ids=token_type_ids,
|
773 |
+
position_ids=position_ids,
|
774 |
+
head_mask=head_mask,
|
775 |
+
inputs_embeds=inputs_embeds,
|
776 |
+
use_cache=use_cache,
|
777 |
+
output_attentions=output_attentions,
|
778 |
+
output_hidden_states=output_hidden_states,
|
779 |
+
return_dict=return_dict,
|
780 |
+
)
|
781 |
+
|
782 |
+
hidden_states = transformer_outputs[0]
|
783 |
+
logits = self.classifier(hidden_states)
|
784 |
+
|
785 |
+
if input_ids is not None:
|
786 |
+
batch_size, sequence_length = input_ids.shape[:2]
|
787 |
+
else:
|
788 |
+
batch_size, sequence_length = inputs_embeds.shape[:2]
|
789 |
+
|
790 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
791 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
792 |
+
|
793 |
+
if self.config.pad_token_id is None:
|
794 |
+
sequence_lengths = -1
|
795 |
+
else:
|
796 |
+
if input_ids is not None:
|
797 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
798 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
799 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
800 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
801 |
+
else:
|
802 |
+
sequence_lengths = -1
|
803 |
+
logger.warning(
|
804 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
805 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
806 |
+
)
|
807 |
+
|
808 |
+
pooled_logits = logits[range(batch_size), sequence_lengths]
|
809 |
+
|
810 |
+
loss = None
|
811 |
+
if labels is not None:
|
812 |
+
if self.config.problem_type is None:
|
813 |
+
if self.num_labels == 1:
|
814 |
+
self.config.problem_type = "regression"
|
815 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
816 |
+
self.config.problem_type = "single_label_classification"
|
817 |
+
else:
|
818 |
+
self.config.problem_type = "multi_label_classification"
|
819 |
+
|
820 |
+
if self.config.problem_type == "regression":
|
821 |
+
loss_fct = MSELoss()
|
822 |
+
if self.num_labels == 1:
|
823 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
824 |
+
else:
|
825 |
+
loss = loss_fct(pooled_logits, labels)
|
826 |
+
elif self.config.problem_type == "single_label_classification":
|
827 |
+
loss_fct = CrossEntropyLoss()
|
828 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
829 |
+
elif self.config.problem_type == "multi_label_classification":
|
830 |
+
loss_fct = BCEWithLogitsLoss()
|
831 |
+
loss = loss_fct(pooled_logits, labels)
|
832 |
+
if not return_dict:
|
833 |
+
output = (pooled_logits,) + transformer_outputs[2:]
|
834 |
+
return ((loss,) + output) if loss is not None else output
|
835 |
+
|
836 |
+
return SequenceClassifierOutput(
|
837 |
+
loss=loss,
|
838 |
+
logits=pooled_logits,
|
839 |
+
hidden_states=transformer_outputs.hidden_states,
|
840 |
+
attentions=transformer_outputs.attentions,
|
841 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/modeling_tf_ctrl.py
ADDED
@@ -0,0 +1,931 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Salesforce and HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" TF 2.0 CTRL model."""
|
17 |
+
|
18 |
+
from __future__ import annotations
|
19 |
+
|
20 |
+
from typing import Optional, Tuple, Union
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
import tensorflow as tf
|
24 |
+
|
25 |
+
from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput
|
26 |
+
from ...modeling_tf_utils import (
|
27 |
+
TFCausalLanguageModelingLoss,
|
28 |
+
TFModelInputType,
|
29 |
+
TFPreTrainedModel,
|
30 |
+
TFSequenceClassificationLoss,
|
31 |
+
get_initializer,
|
32 |
+
keras,
|
33 |
+
keras_serializable,
|
34 |
+
unpack_inputs,
|
35 |
+
)
|
36 |
+
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
|
37 |
+
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
|
38 |
+
from .configuration_ctrl import CTRLConfig
|
39 |
+
|
40 |
+
|
41 |
+
logger = logging.get_logger(__name__)
|
42 |
+
|
43 |
+
_CHECKPOINT_FOR_DOC = "Salesforce/ctrl"
|
44 |
+
_CONFIG_FOR_DOC = "CTRLConfig"
|
45 |
+
|
46 |
+
|
47 |
+
from ..deprecated._archive_maps import TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
48 |
+
|
49 |
+
|
50 |
+
def angle_defn(pos, i, d_model_size):
|
51 |
+
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size)
|
52 |
+
return pos * angle_rates
|
53 |
+
|
54 |
+
|
55 |
+
def positional_encoding(position, d_model_size):
|
56 |
+
# create the sinusoidal pattern for the positional encoding
|
57 |
+
angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
|
58 |
+
|
59 |
+
sines = np.sin(angle_rads[:, 0::2])
|
60 |
+
cosines = np.cos(angle_rads[:, 1::2])
|
61 |
+
pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1))
|
62 |
+
|
63 |
+
return pos_encoding
|
64 |
+
|
65 |
+
|
66 |
+
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
|
67 |
+
# calculate attention
|
68 |
+
matmul_qk = tf.matmul(q, k, transpose_b=True)
|
69 |
+
|
70 |
+
dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype)
|
71 |
+
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
|
72 |
+
|
73 |
+
if mask is not None:
|
74 |
+
scaled_attention_logits += tf.cast(mask * -1e4, dtype=scaled_attention_logits.dtype)
|
75 |
+
|
76 |
+
if attention_mask is not None:
|
77 |
+
# Apply the attention mask
|
78 |
+
attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype)
|
79 |
+
scaled_attention_logits = scaled_attention_logits + attention_mask
|
80 |
+
|
81 |
+
attention_weights = stable_softmax(scaled_attention_logits, axis=-1)
|
82 |
+
|
83 |
+
# Mask heads if we want to
|
84 |
+
if head_mask is not None:
|
85 |
+
attention_weights = attention_weights * head_mask
|
86 |
+
|
87 |
+
output = tf.matmul(attention_weights, v)
|
88 |
+
|
89 |
+
return output, attention_weights
|
90 |
+
|
91 |
+
|
92 |
+
class TFMultiHeadAttention(keras.layers.Layer):
|
93 |
+
def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
|
94 |
+
super().__init__(**kwargs)
|
95 |
+
self.num_heads = num_heads
|
96 |
+
self.d_model_size = d_model_size
|
97 |
+
self.output_attentions = output_attentions
|
98 |
+
|
99 |
+
self.depth = int(d_model_size / self.num_heads)
|
100 |
+
|
101 |
+
self.Wq = keras.layers.Dense(d_model_size, name="Wq")
|
102 |
+
self.Wk = keras.layers.Dense(d_model_size, name="Wk")
|
103 |
+
self.Wv = keras.layers.Dense(d_model_size, name="Wv")
|
104 |
+
|
105 |
+
self.dense = keras.layers.Dense(d_model_size, name="dense")
|
106 |
+
|
107 |
+
def split_into_heads(self, x, batch_size):
|
108 |
+
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
|
109 |
+
return tf.transpose(x, perm=[0, 2, 1, 3])
|
110 |
+
|
111 |
+
def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
|
112 |
+
batch_size = shape_list(q)[0]
|
113 |
+
|
114 |
+
q = self.Wq(q)
|
115 |
+
k = self.Wk(k)
|
116 |
+
v = self.Wv(v)
|
117 |
+
|
118 |
+
q = self.split_into_heads(q, batch_size)
|
119 |
+
k = self.split_into_heads(k, batch_size)
|
120 |
+
v = self.split_into_heads(v, batch_size)
|
121 |
+
|
122 |
+
if layer_past is not None:
|
123 |
+
past_key, past_value = tf.unstack(layer_past, axis=0)
|
124 |
+
k = tf.concat((past_key, k), axis=-2)
|
125 |
+
v = tf.concat((past_value, v), axis=-2)
|
126 |
+
|
127 |
+
if use_cache:
|
128 |
+
present = tf.stack((k, v), axis=0)
|
129 |
+
else:
|
130 |
+
present = (None,)
|
131 |
+
|
132 |
+
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
|
133 |
+
scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
|
134 |
+
attn = output[1]
|
135 |
+
original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
|
136 |
+
output = self.dense(original_size_attention)
|
137 |
+
outputs = (output, present)
|
138 |
+
|
139 |
+
if output_attentions:
|
140 |
+
outputs = outputs + (attn,)
|
141 |
+
|
142 |
+
return outputs
|
143 |
+
|
144 |
+
def build(self, input_shape=None):
|
145 |
+
if self.built:
|
146 |
+
return
|
147 |
+
self.built = True
|
148 |
+
if getattr(self, "Wq", None) is not None:
|
149 |
+
with tf.name_scope(self.Wq.name):
|
150 |
+
self.Wq.build([None, None, self.d_model_size])
|
151 |
+
if getattr(self, "Wk", None) is not None:
|
152 |
+
with tf.name_scope(self.Wk.name):
|
153 |
+
self.Wk.build([None, None, self.d_model_size])
|
154 |
+
if getattr(self, "Wv", None) is not None:
|
155 |
+
with tf.name_scope(self.Wv.name):
|
156 |
+
self.Wv.build([None, None, self.d_model_size])
|
157 |
+
if getattr(self, "dense", None) is not None:
|
158 |
+
with tf.name_scope(self.dense.name):
|
159 |
+
self.dense.build([None, None, self.d_model_size])
|
160 |
+
|
161 |
+
|
162 |
+
class TFPointWiseFeedForwardLayer(keras.layers.Layer):
|
163 |
+
def __init__(self, d_model_size, dff, **kwargs):
|
164 |
+
super().__init__(**kwargs)
|
165 |
+
|
166 |
+
self.dense_0 = keras.layers.Dense(dff, activation="relu", name="0")
|
167 |
+
self.dense_2 = keras.layers.Dense(d_model_size, name="2")
|
168 |
+
self.d_model_size = d_model_size
|
169 |
+
self.dff = dff
|
170 |
+
|
171 |
+
def call(self, inputs, trainable=False):
|
172 |
+
dense_0_output = self.dense_0(inputs)
|
173 |
+
dense_2_output = self.dense_2(dense_0_output)
|
174 |
+
|
175 |
+
return dense_2_output
|
176 |
+
|
177 |
+
def build(self, input_shape=None):
|
178 |
+
if self.built:
|
179 |
+
return
|
180 |
+
self.built = True
|
181 |
+
if getattr(self, "dense_0", None) is not None:
|
182 |
+
with tf.name_scope(self.dense_0.name):
|
183 |
+
self.dense_0.build([None, None, self.d_model_size])
|
184 |
+
if getattr(self, "dense_2", None) is not None:
|
185 |
+
with tf.name_scope(self.dense_2.name):
|
186 |
+
self.dense_2.build([None, None, self.dff])
|
187 |
+
|
188 |
+
|
189 |
+
class TFEncoderLayer(keras.layers.Layer):
|
190 |
+
def __init__(
|
191 |
+
self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs
|
192 |
+
):
|
193 |
+
super().__init__(**kwargs)
|
194 |
+
|
195 |
+
self.output_attentions = output_attentions
|
196 |
+
|
197 |
+
self.multi_head_attention = TFMultiHeadAttention(
|
198 |
+
d_model_size, num_heads, output_attentions=self.output_attentions, name="multi_head_attention"
|
199 |
+
)
|
200 |
+
self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name="ffn")
|
201 |
+
|
202 |
+
self.layernorm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1")
|
203 |
+
self.layernorm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2")
|
204 |
+
|
205 |
+
self.dropout1 = keras.layers.Dropout(rate)
|
206 |
+
self.dropout2 = keras.layers.Dropout(rate)
|
207 |
+
self.d_model_size = d_model_size
|
208 |
+
|
209 |
+
def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
|
210 |
+
normed = self.layernorm1(x)
|
211 |
+
attn_outputs = self.multi_head_attention(
|
212 |
+
normed,
|
213 |
+
normed,
|
214 |
+
normed,
|
215 |
+
mask,
|
216 |
+
layer_past,
|
217 |
+
attention_mask,
|
218 |
+
head_mask,
|
219 |
+
use_cache,
|
220 |
+
output_attentions,
|
221 |
+
training=training,
|
222 |
+
)
|
223 |
+
attn_output = attn_outputs[0]
|
224 |
+
attn_output = self.dropout1(attn_output, training=training)
|
225 |
+
out1 = x + attn_output
|
226 |
+
|
227 |
+
out2 = self.layernorm2(out1)
|
228 |
+
ffn_output = self.ffn(out2)
|
229 |
+
ffn_output = self.dropout2(ffn_output, training=training)
|
230 |
+
out2 = out1 + ffn_output
|
231 |
+
|
232 |
+
outputs = (out2,) + attn_outputs[1:]
|
233 |
+
return outputs
|
234 |
+
|
235 |
+
def build(self, input_shape=None):
|
236 |
+
if self.built:
|
237 |
+
return
|
238 |
+
self.built = True
|
239 |
+
if getattr(self, "multi_head_attention", None) is not None:
|
240 |
+
with tf.name_scope(self.multi_head_attention.name):
|
241 |
+
self.multi_head_attention.build(None)
|
242 |
+
if getattr(self, "ffn", None) is not None:
|
243 |
+
with tf.name_scope(self.ffn.name):
|
244 |
+
self.ffn.build(None)
|
245 |
+
if getattr(self, "layernorm1", None) is not None:
|
246 |
+
with tf.name_scope(self.layernorm1.name):
|
247 |
+
self.layernorm1.build([None, None, self.d_model_size])
|
248 |
+
if getattr(self, "layernorm2", None) is not None:
|
249 |
+
with tf.name_scope(self.layernorm2.name):
|
250 |
+
self.layernorm2.build([None, None, self.d_model_size])
|
251 |
+
|
252 |
+
|
253 |
+
@keras_serializable
|
254 |
+
class TFCTRLMainLayer(keras.layers.Layer):
|
255 |
+
config_class = CTRLConfig
|
256 |
+
|
257 |
+
def __init__(self, config, **kwargs):
|
258 |
+
super().__init__(**kwargs)
|
259 |
+
|
260 |
+
self.config = config
|
261 |
+
self.output_hidden_states = config.output_hidden_states
|
262 |
+
self.output_attentions = config.output_attentions
|
263 |
+
self.use_cache = config.use_cache
|
264 |
+
self.return_dict = config.use_return_dict
|
265 |
+
|
266 |
+
self.d_model_size = config.n_embd
|
267 |
+
self.num_layers = config.n_layer
|
268 |
+
|
269 |
+
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
|
270 |
+
|
271 |
+
self.w = keras.layers.Embedding(
|
272 |
+
input_dim=config.vocab_size,
|
273 |
+
output_dim=config.n_embd,
|
274 |
+
embeddings_initializer=get_initializer(config.initializer_range),
|
275 |
+
name="w",
|
276 |
+
)
|
277 |
+
|
278 |
+
self.dropout = keras.layers.Dropout(config.embd_pdrop)
|
279 |
+
self.h = [
|
280 |
+
TFEncoderLayer(
|
281 |
+
config.n_embd,
|
282 |
+
config.n_head,
|
283 |
+
config.dff,
|
284 |
+
config.resid_pdrop,
|
285 |
+
config.layer_norm_epsilon,
|
286 |
+
self.output_attentions,
|
287 |
+
name=f"h_._{i}",
|
288 |
+
)
|
289 |
+
for i in range(config.n_layer)
|
290 |
+
]
|
291 |
+
self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm")
|
292 |
+
|
293 |
+
def get_input_embeddings(self):
|
294 |
+
return self.w
|
295 |
+
|
296 |
+
def set_input_embeddings(self, new_embeddings):
|
297 |
+
self.w = new_embeddings
|
298 |
+
|
299 |
+
def _prune_heads(self, heads_to_prune):
|
300 |
+
"""
|
301 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
302 |
+
"""
|
303 |
+
raise NotImplementedError
|
304 |
+
|
305 |
+
@unpack_inputs
|
306 |
+
def call(
|
307 |
+
self,
|
308 |
+
input_ids: TFModelInputType | None = None,
|
309 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
310 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
311 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
312 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
313 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
314 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
315 |
+
use_cache: Optional[bool] = None,
|
316 |
+
output_attentions: Optional[bool] = None,
|
317 |
+
output_hidden_states: Optional[bool] = None,
|
318 |
+
return_dict: Optional[bool] = None,
|
319 |
+
training: Optional[bool] = False,
|
320 |
+
) -> Union[Tuple, TFBaseModelOutputWithPast]:
|
321 |
+
# If using past key value states, only the last tokens
|
322 |
+
# should be given as an input
|
323 |
+
if past_key_values is not None:
|
324 |
+
if input_ids is not None:
|
325 |
+
input_ids = input_ids[:, -1:]
|
326 |
+
if inputs_embeds is not None:
|
327 |
+
inputs_embeds = inputs_embeds[:, -1:]
|
328 |
+
if token_type_ids is not None:
|
329 |
+
token_type_ids = token_type_ids[:, -1:]
|
330 |
+
|
331 |
+
if input_ids is not None and inputs_embeds is not None:
|
332 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
333 |
+
elif input_ids is not None:
|
334 |
+
input_shape = shape_list(input_ids)
|
335 |
+
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
|
336 |
+
elif inputs_embeds is not None:
|
337 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
338 |
+
else:
|
339 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
340 |
+
|
341 |
+
if past_key_values is None:
|
342 |
+
past_length = 0
|
343 |
+
past_key_values = [None] * len(self.h)
|
344 |
+
else:
|
345 |
+
past_length = shape_list(past_key_values[0][0])[-2]
|
346 |
+
if position_ids is None:
|
347 |
+
position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0)
|
348 |
+
position_ids = tf.tile(position_ids, [input_shape[0], 1])
|
349 |
+
|
350 |
+
# Attention mask.
|
351 |
+
if attention_mask is not None:
|
352 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
353 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
354 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
355 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
356 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
357 |
+
attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1] + past_length))
|
358 |
+
|
359 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
360 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
361 |
+
# positions we want to attend and -10000.0 for masked positions.
|
362 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
363 |
+
# effectively the same as removing these entirely.
|
364 |
+
|
365 |
+
one_cst = tf.constant(1.0)
|
366 |
+
ten_thousand_cst = tf.constant(-10000.0)
|
367 |
+
attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
|
368 |
+
attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), ten_thousand_cst)
|
369 |
+
|
370 |
+
# Prepare head mask if needed
|
371 |
+
# 1.0 in head_mask indicate we keep the head
|
372 |
+
# attention_probs has shape bsz x n_heads x N x N
|
373 |
+
# head_mask has shape n_layer x batch x n_heads x N x N
|
374 |
+
if head_mask is not None:
|
375 |
+
raise NotImplementedError
|
376 |
+
else:
|
377 |
+
head_mask = [None] * self.num_layers
|
378 |
+
|
379 |
+
if token_type_ids is not None:
|
380 |
+
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
|
381 |
+
token_type_embeds = self.w(token_type_ids)
|
382 |
+
token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype))
|
383 |
+
else:
|
384 |
+
token_type_embeds = tf.constant(0.0)
|
385 |
+
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
|
386 |
+
|
387 |
+
if inputs_embeds is None:
|
388 |
+
check_embeddings_within_bounds(input_ids, self.w.input_dim)
|
389 |
+
inputs_embeds = self.w(input_ids)
|
390 |
+
seq_len = input_shape[-1]
|
391 |
+
mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
|
392 |
+
|
393 |
+
inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, inputs_embeds.dtype))
|
394 |
+
|
395 |
+
pos_embeds = tf.gather(self.pos_encoding, position_ids)
|
396 |
+
pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype)
|
397 |
+
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
|
398 |
+
|
399 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
400 |
+
|
401 |
+
output_shape = input_shape + [shape_list(hidden_states)[-1]]
|
402 |
+
presents = () if use_cache else None
|
403 |
+
all_hidden_states = () if output_hidden_states else None
|
404 |
+
all_attentions = () if output_attentions else None
|
405 |
+
for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)):
|
406 |
+
if output_hidden_states:
|
407 |
+
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
|
408 |
+
outputs = h(
|
409 |
+
hidden_states,
|
410 |
+
mask,
|
411 |
+
layer_past,
|
412 |
+
attention_mask,
|
413 |
+
head_mask[i],
|
414 |
+
use_cache,
|
415 |
+
output_attentions,
|
416 |
+
training=training,
|
417 |
+
)
|
418 |
+
hidden_states, present = outputs[:2]
|
419 |
+
|
420 |
+
if use_cache:
|
421 |
+
presents = presents + (present,)
|
422 |
+
|
423 |
+
if output_attentions:
|
424 |
+
all_attentions = all_attentions + (outputs[2],)
|
425 |
+
|
426 |
+
hidden_states = self.layernorm(hidden_states)
|
427 |
+
hidden_states = tf.reshape(hidden_states, output_shape)
|
428 |
+
if output_hidden_states:
|
429 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
430 |
+
|
431 |
+
if output_attentions:
|
432 |
+
# let the number of heads free (-1) so we can extract attention even after head pruning
|
433 |
+
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
|
434 |
+
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
|
435 |
+
|
436 |
+
if not return_dict:
|
437 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
438 |
+
|
439 |
+
return TFBaseModelOutputWithPast(
|
440 |
+
last_hidden_state=hidden_states,
|
441 |
+
past_key_values=presents,
|
442 |
+
hidden_states=all_hidden_states,
|
443 |
+
attentions=all_attentions,
|
444 |
+
)
|
445 |
+
|
446 |
+
def build(self, input_shape=None):
|
447 |
+
if self.built:
|
448 |
+
return
|
449 |
+
self.built = True
|
450 |
+
if getattr(self, "w", None) is not None:
|
451 |
+
with tf.name_scope(self.w.name):
|
452 |
+
self.w.build(None)
|
453 |
+
if getattr(self, "layernorm", None) is not None:
|
454 |
+
with tf.name_scope(self.layernorm.name):
|
455 |
+
self.layernorm.build([None, None, self.config.n_embd])
|
456 |
+
if getattr(self, "h", None) is not None:
|
457 |
+
for layer in self.h:
|
458 |
+
with tf.name_scope(layer.name):
|
459 |
+
layer.build(None)
|
460 |
+
|
461 |
+
|
462 |
+
class TFCTRLPreTrainedModel(TFPreTrainedModel):
|
463 |
+
"""
|
464 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
465 |
+
models.
|
466 |
+
"""
|
467 |
+
|
468 |
+
config_class = CTRLConfig
|
469 |
+
base_model_prefix = "transformer"
|
470 |
+
|
471 |
+
|
472 |
+
CTRL_START_DOCSTRING = r"""
|
473 |
+
|
474 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
475 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
476 |
+
etc.)
|
477 |
+
|
478 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
479 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
480 |
+
behavior.
|
481 |
+
|
482 |
+
<Tip>
|
483 |
+
|
484 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
485 |
+
|
486 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
487 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
488 |
+
|
489 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
490 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
491 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
492 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
493 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
494 |
+
positional argument:
|
495 |
+
|
496 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
497 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
498 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
499 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
500 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
501 |
+
|
502 |
+
Note that when creating models and layers with
|
503 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
504 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
505 |
+
|
506 |
+
</Tip>
|
507 |
+
|
508 |
+
Parameters:
|
509 |
+
config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.
|
510 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
511 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
512 |
+
"""
|
513 |
+
|
514 |
+
CTRL_INPUTS_DOCSTRING = r"""
|
515 |
+
Args:
|
516 |
+
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
|
517 |
+
`input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
|
518 |
+
input past key value states).
|
519 |
+
|
520 |
+
Indices of input sequence tokens in the vocabulary.
|
521 |
+
|
522 |
+
If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
|
523 |
+
|
524 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
525 |
+
[`PreTrainedTokenizer.encode`] for details.
|
526 |
+
|
527 |
+
[What are input IDs?](../glossary#input-ids)
|
528 |
+
past (`List[tf.Tensor]` of length `config.n_layers`):
|
529 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
530 |
+
`past` output below). Can be used to speed up sequential decoding. The token ids which have their past
|
531 |
+
given to this model should not be passed as input ids as they have already been computed.
|
532 |
+
attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
533 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
534 |
+
|
535 |
+
- 1 for tokens that are **not masked**,
|
536 |
+
- 0 for tokens that are **masked**.
|
537 |
+
|
538 |
+
[What are attention masks?](../glossary#attention-mask)
|
539 |
+
token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
540 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
541 |
+
1]`:
|
542 |
+
|
543 |
+
- 0 corresponds to a *sentence A* token,
|
544 |
+
- 1 corresponds to a *sentence B* token.
|
545 |
+
|
546 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
547 |
+
position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
548 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
549 |
+
config.max_position_embeddings - 1]`.
|
550 |
+
|
551 |
+
[What are position IDs?](../glossary#position-ids)
|
552 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
553 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
554 |
+
|
555 |
+
- 1 indicates the head is **not masked**,
|
556 |
+
- 0 indicates the head is **masked**.
|
557 |
+
|
558 |
+
inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
559 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
560 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
561 |
+
model's internal embedding lookup matrix.
|
562 |
+
use_cache (`bool`, *optional*):
|
563 |
+
If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`).
|
564 |
+
output_attentions (`bool`, *optional*):
|
565 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
566 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
567 |
+
config will be used instead.
|
568 |
+
output_hidden_states (`bool`, *optional*):
|
569 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
570 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
571 |
+
used instead.
|
572 |
+
return_dict (`bool`, *optional*):
|
573 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
574 |
+
eager mode, in graph mode the value will always be set to True.
|
575 |
+
training (`bool`, *optional*, defaults to `False`):
|
576 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
577 |
+
behaviors between training and evaluation).
|
578 |
+
"""
|
579 |
+
|
580 |
+
|
581 |
+
@add_start_docstrings(
|
582 |
+
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
|
583 |
+
CTRL_START_DOCSTRING,
|
584 |
+
)
|
585 |
+
class TFCTRLModel(TFCTRLPreTrainedModel):
|
586 |
+
def __init__(self, config, *inputs, **kwargs):
|
587 |
+
super().__init__(config, *inputs, **kwargs)
|
588 |
+
self.transformer = TFCTRLMainLayer(config, name="transformer")
|
589 |
+
|
590 |
+
@unpack_inputs
|
591 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
592 |
+
@add_code_sample_docstrings(
|
593 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
594 |
+
output_type=TFBaseModelOutputWithPast,
|
595 |
+
config_class=_CONFIG_FOR_DOC,
|
596 |
+
)
|
597 |
+
def call(
|
598 |
+
self,
|
599 |
+
input_ids: TFModelInputType | None = None,
|
600 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
601 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
602 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
603 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
604 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
605 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
606 |
+
use_cache: Optional[bool] = None,
|
607 |
+
output_attentions: Optional[bool] = None,
|
608 |
+
output_hidden_states: Optional[bool] = None,
|
609 |
+
return_dict: Optional[bool] = None,
|
610 |
+
training: Optional[bool] = False,
|
611 |
+
) -> Union[Tuple, TFBaseModelOutputWithPast]:
|
612 |
+
outputs = self.transformer(
|
613 |
+
input_ids=input_ids,
|
614 |
+
past_key_values=past_key_values,
|
615 |
+
attention_mask=attention_mask,
|
616 |
+
token_type_ids=token_type_ids,
|
617 |
+
position_ids=position_ids,
|
618 |
+
head_mask=head_mask,
|
619 |
+
inputs_embeds=inputs_embeds,
|
620 |
+
use_cache=use_cache,
|
621 |
+
output_attentions=output_attentions,
|
622 |
+
output_hidden_states=output_hidden_states,
|
623 |
+
return_dict=return_dict,
|
624 |
+
training=training,
|
625 |
+
)
|
626 |
+
return outputs
|
627 |
+
|
628 |
+
def build(self, input_shape=None):
|
629 |
+
if self.built:
|
630 |
+
return
|
631 |
+
self.built = True
|
632 |
+
if getattr(self, "transformer", None) is not None:
|
633 |
+
with tf.name_scope(self.transformer.name):
|
634 |
+
self.transformer.build(None)
|
635 |
+
|
636 |
+
|
637 |
+
class TFCTRLBiasLayer(keras.layers.Layer):
|
638 |
+
"""
|
639 |
+
Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
|
640 |
+
so all weights have to be registered in a layer.
|
641 |
+
"""
|
642 |
+
|
643 |
+
def __init__(self, shape, initializer, trainable, name, **kwargs):
|
644 |
+
super().__init__(name=name, **kwargs)
|
645 |
+
self.shape = shape
|
646 |
+
self.initializer = initializer
|
647 |
+
self.trainable = trainable
|
648 |
+
|
649 |
+
def build(self, input_shape):
|
650 |
+
self.bias = self.add_weight(
|
651 |
+
name="bias", shape=self.shape, initializer=self.initializer, trainable=self.trainable
|
652 |
+
)
|
653 |
+
super().build(input_shape)
|
654 |
+
|
655 |
+
def call(self, x):
|
656 |
+
return x + self.bias
|
657 |
+
|
658 |
+
|
659 |
+
@add_start_docstrings(
|
660 |
+
"""
|
661 |
+
The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input
|
662 |
+
embeddings).
|
663 |
+
""",
|
664 |
+
CTRL_START_DOCSTRING,
|
665 |
+
)
|
666 |
+
class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):
|
667 |
+
def __init__(self, config, *inputs, **kwargs):
|
668 |
+
super().__init__(config, *inputs, **kwargs)
|
669 |
+
self.transformer = TFCTRLMainLayer(config, name="transformer")
|
670 |
+
self.bias_layer = TFCTRLBiasLayer(
|
671 |
+
name="lm_head", shape=[1, config.vocab_size], initializer="zeros", trainable=True
|
672 |
+
)
|
673 |
+
|
674 |
+
def get_output_embeddings(self):
|
675 |
+
return self.get_input_embeddings()
|
676 |
+
|
677 |
+
def set_output_embeddings(self, value):
|
678 |
+
self.set_input_embeddings(value)
|
679 |
+
|
680 |
+
def get_bias(self):
|
681 |
+
return {"lm_head.bias": self.bias_layer.bias}
|
682 |
+
|
683 |
+
def set_bias(self, value):
|
684 |
+
# Replaces the existing layers containing bias for correct (de)serialization.
|
685 |
+
vocab_size = value["lm_head.bias"].shape[-1]
|
686 |
+
self.bias_layer = TFCTRLBiasLayer(
|
687 |
+
name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=True
|
688 |
+
)
|
689 |
+
self.bias_layer.build(None)
|
690 |
+
self.bias_layer.bias.assign(value["lm_head.bias"])
|
691 |
+
|
692 |
+
# Copied from transformers.models.gpt2.modeling_tf_gpt2.TFGPT2LMHeadModel.prepare_inputs_for_generation
|
693 |
+
def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
|
694 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
695 |
+
# only last token for inputs_ids if past is defined in kwargs
|
696 |
+
if past_key_values:
|
697 |
+
inputs = tf.expand_dims(inputs[:, -1], -1)
|
698 |
+
if token_type_ids is not None:
|
699 |
+
token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
|
700 |
+
|
701 |
+
position_ids = kwargs.get("position_ids", None)
|
702 |
+
attention_mask = kwargs.get("attention_mask", None)
|
703 |
+
|
704 |
+
if attention_mask is not None and position_ids is None:
|
705 |
+
position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
|
706 |
+
if past_key_values:
|
707 |
+
position_ids = tf.expand_dims(position_ids[:, -1], -1)
|
708 |
+
|
709 |
+
return {
|
710 |
+
"input_ids": inputs,
|
711 |
+
"attention_mask": attention_mask,
|
712 |
+
"position_ids": position_ids,
|
713 |
+
"past_key_values": past_key_values,
|
714 |
+
"use_cache": use_cache,
|
715 |
+
"token_type_ids": token_type_ids,
|
716 |
+
}
|
717 |
+
|
718 |
+
@unpack_inputs
|
719 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
720 |
+
@add_code_sample_docstrings(
|
721 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
722 |
+
output_type=TFCausalLMOutputWithPast,
|
723 |
+
config_class=_CONFIG_FOR_DOC,
|
724 |
+
)
|
725 |
+
def call(
|
726 |
+
self,
|
727 |
+
input_ids: TFModelInputType | None = None,
|
728 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
729 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
730 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
731 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
732 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
733 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
734 |
+
use_cache: Optional[bool] = None,
|
735 |
+
output_attentions: Optional[bool] = None,
|
736 |
+
output_hidden_states: Optional[bool] = None,
|
737 |
+
return_dict: Optional[bool] = None,
|
738 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
739 |
+
training: Optional[bool] = False,
|
740 |
+
) -> Union[Tuple, TFCausalLMOutputWithPast]:
|
741 |
+
r"""
|
742 |
+
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
743 |
+
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
|
744 |
+
config.vocab_size - 1]`.
|
745 |
+
"""
|
746 |
+
transformer_outputs = self.transformer(
|
747 |
+
input_ids=input_ids,
|
748 |
+
past_key_values=past_key_values,
|
749 |
+
attention_mask=attention_mask,
|
750 |
+
token_type_ids=token_type_ids,
|
751 |
+
position_ids=position_ids,
|
752 |
+
head_mask=head_mask,
|
753 |
+
inputs_embeds=inputs_embeds,
|
754 |
+
use_cache=use_cache,
|
755 |
+
output_attentions=output_attentions,
|
756 |
+
output_hidden_states=output_hidden_states,
|
757 |
+
return_dict=return_dict,
|
758 |
+
training=training,
|
759 |
+
)
|
760 |
+
hidden_states = transformer_outputs[0]
|
761 |
+
logits = tf.matmul(hidden_states, self.transformer.w.weights, transpose_b=True)
|
762 |
+
logits = self.bias_layer(logits)
|
763 |
+
|
764 |
+
loss = None
|
765 |
+
if labels is not None:
|
766 |
+
# shift labels to the left and cut last logit token
|
767 |
+
shifted_logits = logits[:, :-1]
|
768 |
+
labels = labels[:, 1:]
|
769 |
+
loss = self.hf_compute_loss(labels, shifted_logits)
|
770 |
+
|
771 |
+
if not return_dict:
|
772 |
+
output = (logits,) + transformer_outputs[1:]
|
773 |
+
return ((loss,) + output) if loss is not None else output
|
774 |
+
|
775 |
+
return TFCausalLMOutputWithPast(
|
776 |
+
loss=loss,
|
777 |
+
logits=logits,
|
778 |
+
past_key_values=transformer_outputs.past_key_values,
|
779 |
+
hidden_states=transformer_outputs.hidden_states,
|
780 |
+
attentions=transformer_outputs.attentions,
|
781 |
+
)
|
782 |
+
|
783 |
+
def build(self, input_shape=None):
|
784 |
+
if self.built:
|
785 |
+
return
|
786 |
+
self.built = True
|
787 |
+
if getattr(self, "transformer", None) is not None:
|
788 |
+
with tf.name_scope(self.transformer.name):
|
789 |
+
self.transformer.build(None)
|
790 |
+
if getattr(self, "bias_layer", None) is not None:
|
791 |
+
with tf.name_scope(self.bias_layer.name):
|
792 |
+
self.bias_layer.build(None)
|
793 |
+
|
794 |
+
|
795 |
+
@add_start_docstrings(
|
796 |
+
"""
|
797 |
+
The CTRL Model transformer with a sequence classification head on top (linear layer).
|
798 |
+
|
799 |
+
[`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
800 |
+
(e.g. GPT-1, GPT-2) do.
|
801 |
+
|
802 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
803 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
804 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
805 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
806 |
+
each row of the batch).
|
807 |
+
""",
|
808 |
+
CTRL_START_DOCSTRING,
|
809 |
+
)
|
810 |
+
class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss):
|
811 |
+
def __init__(self, config, *inputs, **kwargs):
|
812 |
+
super().__init__(config, *inputs, **kwargs)
|
813 |
+
self.num_labels = config.num_labels
|
814 |
+
self.classifier = keras.layers.Dense(
|
815 |
+
config.num_labels,
|
816 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
817 |
+
name="classifier",
|
818 |
+
use_bias=False,
|
819 |
+
)
|
820 |
+
self.transformer = TFCTRLMainLayer(config, name="transformer")
|
821 |
+
self.config = config
|
822 |
+
|
823 |
+
def get_output_embeddings(self):
|
824 |
+
# Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
|
825 |
+
logger.warning(
|
826 |
+
"Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
|
827 |
+
"in transformers v4.32."
|
828 |
+
)
|
829 |
+
return self.transformer.w
|
830 |
+
|
831 |
+
@unpack_inputs
|
832 |
+
@add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
|
833 |
+
@add_code_sample_docstrings(
|
834 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
835 |
+
output_type=TFSequenceClassifierOutput,
|
836 |
+
config_class=_CONFIG_FOR_DOC,
|
837 |
+
)
|
838 |
+
def call(
|
839 |
+
self,
|
840 |
+
input_ids: TFModelInputType | None = None,
|
841 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
842 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
843 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
844 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
845 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
846 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
847 |
+
use_cache: Optional[bool] = None,
|
848 |
+
output_attentions: Optional[bool] = None,
|
849 |
+
output_hidden_states: Optional[bool] = None,
|
850 |
+
return_dict: Optional[bool] = None,
|
851 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
852 |
+
training: Optional[bool] = False,
|
853 |
+
) -> Union[Tuple, TFSequenceClassifierOutput]:
|
854 |
+
r"""
|
855 |
+
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
856 |
+
Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
|
857 |
+
config.vocab_size - 1]`.
|
858 |
+
"""
|
859 |
+
|
860 |
+
transformer_outputs = self.transformer(
|
861 |
+
input_ids=input_ids,
|
862 |
+
past_key_values=past_key_values,
|
863 |
+
attention_mask=attention_mask,
|
864 |
+
token_type_ids=token_type_ids,
|
865 |
+
position_ids=position_ids,
|
866 |
+
head_mask=head_mask,
|
867 |
+
inputs_embeds=inputs_embeds,
|
868 |
+
use_cache=use_cache,
|
869 |
+
output_attentions=output_attentions,
|
870 |
+
output_hidden_states=output_hidden_states,
|
871 |
+
return_dict=return_dict,
|
872 |
+
training=training,
|
873 |
+
)
|
874 |
+
|
875 |
+
hidden_states = transformer_outputs[0]
|
876 |
+
logits = self.classifier(hidden_states)
|
877 |
+
in_logits = None
|
878 |
+
if self.config.pad_token_id is None:
|
879 |
+
sequence_lengths = -1
|
880 |
+
else:
|
881 |
+
if input_ids is not None:
|
882 |
+
sequence_lengths = (
|
883 |
+
tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
|
884 |
+
- 1
|
885 |
+
)
|
886 |
+
sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
|
887 |
+
in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
|
888 |
+
else:
|
889 |
+
sequence_lengths = -1
|
890 |
+
logger.warning(
|
891 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
892 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
893 |
+
)
|
894 |
+
loss = None
|
895 |
+
|
896 |
+
if labels is not None:
|
897 |
+
if input_ids is not None:
|
898 |
+
batch_size, sequence_length = shape_list(input_ids)[:2]
|
899 |
+
else:
|
900 |
+
batch_size, sequence_length = shape_list(inputs_embeds)[:2]
|
901 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
902 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
903 |
+
|
904 |
+
if not tf.is_tensor(sequence_lengths):
|
905 |
+
in_logits = logits[0:batch_size, sequence_lengths]
|
906 |
+
|
907 |
+
loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
|
908 |
+
|
909 |
+
pooled_logits = in_logits if in_logits is not None else logits
|
910 |
+
|
911 |
+
if not return_dict:
|
912 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
913 |
+
return ((loss,) + output) if loss is not None else output
|
914 |
+
|
915 |
+
return TFSequenceClassifierOutput(
|
916 |
+
loss=loss,
|
917 |
+
logits=pooled_logits,
|
918 |
+
hidden_states=transformer_outputs.hidden_states,
|
919 |
+
attentions=transformer_outputs.attentions,
|
920 |
+
)
|
921 |
+
|
922 |
+
def build(self, input_shape=None):
|
923 |
+
if self.built:
|
924 |
+
return
|
925 |
+
self.built = True
|
926 |
+
if getattr(self, "classifier", None) is not None:
|
927 |
+
with tf.name_scope(self.classifier.name):
|
928 |
+
self.classifier.build([None, None, self.config.n_embd])
|
929 |
+
if getattr(self, "transformer", None) is not None:
|
930 |
+
with tf.name_scope(self.transformer.name):
|
931 |
+
self.transformer.build(None)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ctrl/tokenization_ctrl.py
ADDED
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Salesforce and The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for Salesforce CTRL."""
|
16 |
+
|
17 |
+
|
18 |
+
import json
|
19 |
+
import os
|
20 |
+
from typing import Optional, Tuple
|
21 |
+
|
22 |
+
import regex as re
|
23 |
+
|
24 |
+
from ...tokenization_utils import PreTrainedTokenizer
|
25 |
+
from ...utils import logging
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
VOCAB_FILES_NAMES = {
|
31 |
+
"vocab_file": "vocab.json",
|
32 |
+
"merges_file": "merges.txt",
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
CONTROL_CODES = {
|
37 |
+
"Pregnancy": 168629,
|
38 |
+
"Christianity": 7675,
|
39 |
+
"Explain": 106423,
|
40 |
+
"Fitness": 63440,
|
41 |
+
"Saving": 63163,
|
42 |
+
"Ask": 27171,
|
43 |
+
"Ass": 95985,
|
44 |
+
"Joke": 163509,
|
45 |
+
"Questions": 45622,
|
46 |
+
"Thoughts": 49605,
|
47 |
+
"Retail": 52342,
|
48 |
+
"Feminism": 164338,
|
49 |
+
"Writing": 11992,
|
50 |
+
"Atheism": 192263,
|
51 |
+
"Netflix": 48616,
|
52 |
+
"Computing": 39639,
|
53 |
+
"Opinion": 43213,
|
54 |
+
"Alone": 44967,
|
55 |
+
"Funny": 58917,
|
56 |
+
"Gaming": 40358,
|
57 |
+
"Human": 4088,
|
58 |
+
"India": 1331,
|
59 |
+
"Joker": 77138,
|
60 |
+
"Diet": 36206,
|
61 |
+
"Legal": 11859,
|
62 |
+
"Norman": 4939,
|
63 |
+
"Tip": 72689,
|
64 |
+
"Weight": 52343,
|
65 |
+
"Movies": 46273,
|
66 |
+
"Running": 23425,
|
67 |
+
"Science": 2090,
|
68 |
+
"Horror": 37793,
|
69 |
+
"Confession": 60572,
|
70 |
+
"Finance": 12250,
|
71 |
+
"Politics": 16360,
|
72 |
+
"Scary": 191985,
|
73 |
+
"Support": 12654,
|
74 |
+
"Technologies": 32516,
|
75 |
+
"Teenage": 66160,
|
76 |
+
"Event": 32769,
|
77 |
+
"Learned": 67460,
|
78 |
+
"Notion": 182770,
|
79 |
+
"Wikipedia": 37583,
|
80 |
+
"Books": 6665,
|
81 |
+
"Extract": 76050,
|
82 |
+
"Confessions": 102701,
|
83 |
+
"Conspiracy": 75932,
|
84 |
+
"Links": 63674,
|
85 |
+
"Narcissus": 150425,
|
86 |
+
"Relationship": 54766,
|
87 |
+
"Relationships": 134796,
|
88 |
+
"Reviews": 41671,
|
89 |
+
"News": 4256,
|
90 |
+
"Translation": 26820,
|
91 |
+
"multilingual": 128406,
|
92 |
+
}
|
93 |
+
|
94 |
+
|
95 |
+
def get_pairs(word):
|
96 |
+
"""
|
97 |
+
Return set of symbol pairs in a word.
|
98 |
+
|
99 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
100 |
+
"""
|
101 |
+
pairs = set()
|
102 |
+
prev_char = word[0]
|
103 |
+
for char in word[1:]:
|
104 |
+
pairs.add((prev_char, char))
|
105 |
+
prev_char = char
|
106 |
+
|
107 |
+
pairs = set(pairs)
|
108 |
+
return pairs
|
109 |
+
|
110 |
+
|
111 |
+
class CTRLTokenizer(PreTrainedTokenizer):
|
112 |
+
"""
|
113 |
+
Construct a CTRL tokenizer. Based on Byte-Pair-Encoding.
|
114 |
+
|
115 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
116 |
+
this superclass for more information regarding those methods.
|
117 |
+
|
118 |
+
Args:
|
119 |
+
vocab_file (`str`):
|
120 |
+
Path to the vocabulary file.
|
121 |
+
merges_file (`str`):
|
122 |
+
Path to the merges file.
|
123 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
124 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
125 |
+
token instead.
|
126 |
+
"""
|
127 |
+
|
128 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
129 |
+
control_codes = CONTROL_CODES
|
130 |
+
|
131 |
+
def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs):
|
132 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
133 |
+
self.encoder = json.load(vocab_handle)
|
134 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
135 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
136 |
+
merges = merges_handle.read().split("\n")[1:-1]
|
137 |
+
merges = [tuple(merge.split()) for merge in merges]
|
138 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
139 |
+
self.cache = {}
|
140 |
+
super().__init__(unk_token=unk_token, **kwargs)
|
141 |
+
|
142 |
+
@property
|
143 |
+
def vocab_size(self):
|
144 |
+
return len(self.encoder)
|
145 |
+
|
146 |
+
def get_vocab(self):
|
147 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
148 |
+
|
149 |
+
def bpe(self, token):
|
150 |
+
if token in self.cache:
|
151 |
+
return self.cache[token]
|
152 |
+
word = tuple(token)
|
153 |
+
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
|
154 |
+
pairs = get_pairs(word)
|
155 |
+
|
156 |
+
if not pairs:
|
157 |
+
return token
|
158 |
+
|
159 |
+
while True:
|
160 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
161 |
+
if bigram not in self.bpe_ranks:
|
162 |
+
break
|
163 |
+
first, second = bigram
|
164 |
+
new_word = []
|
165 |
+
i = 0
|
166 |
+
while i < len(word):
|
167 |
+
try:
|
168 |
+
j = word.index(first, i)
|
169 |
+
except ValueError:
|
170 |
+
new_word.extend(word[i:])
|
171 |
+
break
|
172 |
+
else:
|
173 |
+
new_word.extend(word[i:j])
|
174 |
+
i = j
|
175 |
+
|
176 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
177 |
+
new_word.append(first + second)
|
178 |
+
i += 2
|
179 |
+
else:
|
180 |
+
new_word.append(word[i])
|
181 |
+
i += 1
|
182 |
+
new_word = tuple(new_word)
|
183 |
+
word = new_word
|
184 |
+
if len(word) == 1:
|
185 |
+
break
|
186 |
+
else:
|
187 |
+
pairs = get_pairs(word)
|
188 |
+
word = "@@ ".join(word)
|
189 |
+
word = word[:-4]
|
190 |
+
self.cache[token] = word
|
191 |
+
return word
|
192 |
+
|
193 |
+
def _tokenize(self, text):
|
194 |
+
"""Tokenize a string."""
|
195 |
+
split_tokens = []
|
196 |
+
|
197 |
+
words = re.findall(r"\S+\n?", text)
|
198 |
+
|
199 |
+
for token in words:
|
200 |
+
split_tokens.extend(list(self.bpe(token).split(" ")))
|
201 |
+
return split_tokens
|
202 |
+
|
203 |
+
def _convert_token_to_id(self, token):
|
204 |
+
"""Converts a token (str) in an id using the vocab."""
|
205 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
206 |
+
|
207 |
+
def _convert_id_to_token(self, index):
|
208 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
209 |
+
return self.decoder.get(index, self.unk_token)
|
210 |
+
|
211 |
+
def convert_tokens_to_string(self, tokens):
|
212 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
213 |
+
out_string = " ".join(tokens).replace("@@ ", "").strip()
|
214 |
+
return out_string
|
215 |
+
|
216 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
217 |
+
if not os.path.isdir(save_directory):
|
218 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
219 |
+
return
|
220 |
+
vocab_file = os.path.join(
|
221 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
222 |
+
)
|
223 |
+
merge_file = os.path.join(
|
224 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
225 |
+
)
|
226 |
+
|
227 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
228 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
229 |
+
|
230 |
+
index = 0
|
231 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
232 |
+
writer.write("#version: 0.2\n")
|
233 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
234 |
+
if index != token_index:
|
235 |
+
logger.warning(
|
236 |
+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
237 |
+
" Please check that the tokenizer is not corrupted!"
|
238 |
+
)
|
239 |
+
index = token_index
|
240 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
241 |
+
index += 1
|
242 |
+
|
243 |
+
return vocab_file, merge_file
|
244 |
+
|
245 |
+
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
|
246 |
+
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
|
247 |
+
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
|
248 |
+
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
|
249 |
+
# return ''.join(tokens_generated_so_far)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (203 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/_archive_maps.cpython-310.pyc
ADDED
Binary file (87.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (208 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/convert_bort_original_gluonnlp_checkpoint_to_pytorch.cpython-310.pyc
ADDED
Binary file (6.55 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020, The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert Bort checkpoint."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import os
|
20 |
+
|
21 |
+
import gluonnlp as nlp
|
22 |
+
import mxnet as mx
|
23 |
+
import numpy as np
|
24 |
+
import torch
|
25 |
+
from gluonnlp.base import get_home_dir
|
26 |
+
from gluonnlp.model.bert import BERTEncoder
|
27 |
+
from gluonnlp.model.utils import _load_vocab
|
28 |
+
from gluonnlp.vocab import Vocab
|
29 |
+
from packaging import version
|
30 |
+
from torch import nn
|
31 |
+
|
32 |
+
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
|
33 |
+
from transformers.models.bert.modeling_bert import (
|
34 |
+
BertIntermediate,
|
35 |
+
BertLayer,
|
36 |
+
BertOutput,
|
37 |
+
BertSelfAttention,
|
38 |
+
BertSelfOutput,
|
39 |
+
)
|
40 |
+
from transformers.utils import logging
|
41 |
+
|
42 |
+
|
43 |
+
if version.parse(nlp.__version__) != version.parse("0.8.3"):
|
44 |
+
raise Exception("requires gluonnlp == 0.8.3")
|
45 |
+
|
46 |
+
if version.parse(mx.__version__) != version.parse("1.5.0"):
|
47 |
+
raise Exception("requires mxnet == 1.5.0")
|
48 |
+
|
49 |
+
logging.set_verbosity_info()
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
SAMPLE_TEXT = "The Nymphenburg Palace is a beautiful palace in Munich!"
|
53 |
+
|
54 |
+
|
55 |
+
def convert_bort_checkpoint_to_pytorch(bort_checkpoint_path: str, pytorch_dump_folder_path: str):
|
56 |
+
"""
|
57 |
+
Convert the original Bort checkpoint (based on MXNET and Gluonnlp) to our BERT structure-
|
58 |
+
"""
|
59 |
+
|
60 |
+
# Original Bort configuration
|
61 |
+
bort_4_8_768_1024_hparams = {
|
62 |
+
"attention_cell": "multi_head",
|
63 |
+
"num_layers": 4,
|
64 |
+
"units": 1024,
|
65 |
+
"hidden_size": 768,
|
66 |
+
"max_length": 512,
|
67 |
+
"num_heads": 8,
|
68 |
+
"scaled": True,
|
69 |
+
"dropout": 0.1,
|
70 |
+
"use_residual": True,
|
71 |
+
"embed_size": 1024,
|
72 |
+
"embed_dropout": 0.1,
|
73 |
+
"word_embed": None,
|
74 |
+
"layer_norm_eps": 1e-5,
|
75 |
+
"token_type_vocab_size": 2,
|
76 |
+
}
|
77 |
+
|
78 |
+
predefined_args = bort_4_8_768_1024_hparams
|
79 |
+
|
80 |
+
# Let's construct the original Bort model here
|
81 |
+
# Taken from official BERT implementation, see:
|
82 |
+
# https://github.com/alexa/bort/blob/master/bort/bort.py
|
83 |
+
encoder = BERTEncoder(
|
84 |
+
attention_cell=predefined_args["attention_cell"],
|
85 |
+
num_layers=predefined_args["num_layers"],
|
86 |
+
units=predefined_args["units"],
|
87 |
+
hidden_size=predefined_args["hidden_size"],
|
88 |
+
max_length=predefined_args["max_length"],
|
89 |
+
num_heads=predefined_args["num_heads"],
|
90 |
+
scaled=predefined_args["scaled"],
|
91 |
+
dropout=predefined_args["dropout"],
|
92 |
+
output_attention=False,
|
93 |
+
output_all_encodings=False,
|
94 |
+
use_residual=predefined_args["use_residual"],
|
95 |
+
activation=predefined_args.get("activation", "gelu"),
|
96 |
+
layer_norm_eps=predefined_args.get("layer_norm_eps", None),
|
97 |
+
)
|
98 |
+
|
99 |
+
# Vocab information needs to be fetched first
|
100 |
+
# It's the same as RoBERTa, so RobertaTokenizer can be used later
|
101 |
+
vocab_name = "openwebtext_ccnews_stories_books_cased"
|
102 |
+
|
103 |
+
# Specify download folder to Gluonnlp's vocab
|
104 |
+
gluon_cache_dir = os.path.join(get_home_dir(), "models")
|
105 |
+
bort_vocab = _load_vocab(vocab_name, None, gluon_cache_dir, cls=Vocab)
|
106 |
+
|
107 |
+
original_bort = nlp.model.BERTModel(
|
108 |
+
encoder,
|
109 |
+
len(bort_vocab),
|
110 |
+
units=predefined_args["units"],
|
111 |
+
embed_size=predefined_args["embed_size"],
|
112 |
+
embed_dropout=predefined_args["embed_dropout"],
|
113 |
+
word_embed=predefined_args["word_embed"],
|
114 |
+
use_pooler=False,
|
115 |
+
use_token_type_embed=False,
|
116 |
+
token_type_vocab_size=predefined_args["token_type_vocab_size"],
|
117 |
+
use_classifier=False,
|
118 |
+
use_decoder=False,
|
119 |
+
)
|
120 |
+
|
121 |
+
original_bort.load_parameters(bort_checkpoint_path, cast_dtype=True, ignore_extra=True)
|
122 |
+
params = original_bort._collect_params_with_prefix()
|
123 |
+
|
124 |
+
# Build our config 🤗
|
125 |
+
hf_bort_config_json = {
|
126 |
+
"architectures": ["BertForMaskedLM"],
|
127 |
+
"attention_probs_dropout_prob": predefined_args["dropout"],
|
128 |
+
"hidden_act": "gelu",
|
129 |
+
"hidden_dropout_prob": predefined_args["dropout"],
|
130 |
+
"hidden_size": predefined_args["embed_size"],
|
131 |
+
"initializer_range": 0.02,
|
132 |
+
"intermediate_size": predefined_args["hidden_size"],
|
133 |
+
"layer_norm_eps": predefined_args["layer_norm_eps"],
|
134 |
+
"max_position_embeddings": predefined_args["max_length"],
|
135 |
+
"model_type": "bort",
|
136 |
+
"num_attention_heads": predefined_args["num_heads"],
|
137 |
+
"num_hidden_layers": predefined_args["num_layers"],
|
138 |
+
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
|
139 |
+
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
|
140 |
+
"vocab_size": len(bort_vocab),
|
141 |
+
}
|
142 |
+
|
143 |
+
hf_bort_config = BertConfig.from_dict(hf_bort_config_json)
|
144 |
+
hf_bort_model = BertForMaskedLM(hf_bort_config)
|
145 |
+
hf_bort_model.eval()
|
146 |
+
|
147 |
+
# Parameter mapping table (Gluonnlp to Transformers)
|
148 |
+
# * denotes layer index
|
149 |
+
#
|
150 |
+
# | Gluon Parameter | Transformers Parameter
|
151 |
+
# | -------------------------------------------------------------- | ----------------------
|
152 |
+
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
|
153 |
+
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
|
154 |
+
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
|
155 |
+
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
|
156 |
+
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
|
157 |
+
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
|
158 |
+
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
|
159 |
+
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
|
160 |
+
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
|
161 |
+
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
|
162 |
+
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
|
163 |
+
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
|
164 |
+
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
|
165 |
+
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
|
166 |
+
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
|
167 |
+
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
|
168 |
+
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
|
169 |
+
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
|
170 |
+
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
|
171 |
+
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
|
172 |
+
|
173 |
+
# Helper function to convert MXNET Arrays to PyTorch
|
174 |
+
def to_torch(mx_array) -> nn.Parameter:
|
175 |
+
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy()))
|
176 |
+
|
177 |
+
# Check param shapes and map new HF param back
|
178 |
+
def check_and_map_params(hf_param, gluon_param):
|
179 |
+
shape_hf = hf_param.shape
|
180 |
+
|
181 |
+
gluon_param = to_torch(params[gluon_param])
|
182 |
+
shape_gluon = gluon_param.shape
|
183 |
+
|
184 |
+
assert (
|
185 |
+
shape_hf == shape_gluon
|
186 |
+
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
|
187 |
+
|
188 |
+
return gluon_param
|
189 |
+
|
190 |
+
hf_bort_model.bert.embeddings.word_embeddings.weight = check_and_map_params(
|
191 |
+
hf_bort_model.bert.embeddings.word_embeddings.weight, "word_embed.0.weight"
|
192 |
+
)
|
193 |
+
hf_bort_model.bert.embeddings.position_embeddings.weight = check_and_map_params(
|
194 |
+
hf_bort_model.bert.embeddings.position_embeddings.weight, "encoder.position_weight"
|
195 |
+
)
|
196 |
+
hf_bort_model.bert.embeddings.LayerNorm.bias = check_and_map_params(
|
197 |
+
hf_bort_model.bert.embeddings.LayerNorm.bias, "encoder.layer_norm.beta"
|
198 |
+
)
|
199 |
+
hf_bort_model.bert.embeddings.LayerNorm.weight = check_and_map_params(
|
200 |
+
hf_bort_model.bert.embeddings.LayerNorm.weight, "encoder.layer_norm.gamma"
|
201 |
+
)
|
202 |
+
|
203 |
+
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
|
204 |
+
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
|
205 |
+
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data
|
206 |
+
)
|
207 |
+
|
208 |
+
for i in range(hf_bort_config.num_hidden_layers):
|
209 |
+
layer: BertLayer = hf_bort_model.bert.encoder.layer[i]
|
210 |
+
|
211 |
+
# self attention
|
212 |
+
self_attn: BertSelfAttention = layer.attention.self
|
213 |
+
|
214 |
+
self_attn.key.bias.data = check_and_map_params(
|
215 |
+
self_attn.key.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias"
|
216 |
+
)
|
217 |
+
|
218 |
+
self_attn.key.weight.data = check_and_map_params(
|
219 |
+
self_attn.key.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight"
|
220 |
+
)
|
221 |
+
self_attn.query.bias.data = check_and_map_params(
|
222 |
+
self_attn.query.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias"
|
223 |
+
)
|
224 |
+
self_attn.query.weight.data = check_and_map_params(
|
225 |
+
self_attn.query.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight"
|
226 |
+
)
|
227 |
+
self_attn.value.bias.data = check_and_map_params(
|
228 |
+
self_attn.value.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias"
|
229 |
+
)
|
230 |
+
self_attn.value.weight.data = check_and_map_params(
|
231 |
+
self_attn.value.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight"
|
232 |
+
)
|
233 |
+
|
234 |
+
# self attention output
|
235 |
+
self_output: BertSelfOutput = layer.attention.output
|
236 |
+
|
237 |
+
self_output.dense.bias = check_and_map_params(
|
238 |
+
self_output.dense.bias, f"encoder.transformer_cells.{i}.proj.bias"
|
239 |
+
)
|
240 |
+
self_output.dense.weight = check_and_map_params(
|
241 |
+
self_output.dense.weight, f"encoder.transformer_cells.{i}.proj.weight"
|
242 |
+
)
|
243 |
+
self_output.LayerNorm.bias = check_and_map_params(
|
244 |
+
self_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.layer_norm.beta"
|
245 |
+
)
|
246 |
+
self_output.LayerNorm.weight = check_and_map_params(
|
247 |
+
self_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.layer_norm.gamma"
|
248 |
+
)
|
249 |
+
|
250 |
+
# intermediate
|
251 |
+
intermediate: BertIntermediate = layer.intermediate
|
252 |
+
|
253 |
+
intermediate.dense.bias = check_and_map_params(
|
254 |
+
intermediate.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_1.bias"
|
255 |
+
)
|
256 |
+
intermediate.dense.weight = check_and_map_params(
|
257 |
+
intermediate.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_1.weight"
|
258 |
+
)
|
259 |
+
|
260 |
+
# output
|
261 |
+
bert_output: BertOutput = layer.output
|
262 |
+
|
263 |
+
bert_output.dense.bias = check_and_map_params(
|
264 |
+
bert_output.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_2.bias"
|
265 |
+
)
|
266 |
+
bert_output.dense.weight = check_and_map_params(
|
267 |
+
bert_output.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_2.weight"
|
268 |
+
)
|
269 |
+
bert_output.LayerNorm.bias = check_and_map_params(
|
270 |
+
bert_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.ffn.layer_norm.beta"
|
271 |
+
)
|
272 |
+
bert_output.LayerNorm.weight = check_and_map_params(
|
273 |
+
bert_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma"
|
274 |
+
)
|
275 |
+
|
276 |
+
# Save space and energy 🎄
|
277 |
+
hf_bort_model.half()
|
278 |
+
|
279 |
+
# Compare output of both models
|
280 |
+
tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-base")
|
281 |
+
|
282 |
+
input_ids = tokenizer.encode_plus(SAMPLE_TEXT)["input_ids"]
|
283 |
+
|
284 |
+
# Get gluon output
|
285 |
+
gluon_input_ids = mx.nd.array([input_ids])
|
286 |
+
output_gluon = original_bort(inputs=gluon_input_ids, token_types=[])
|
287 |
+
|
288 |
+
# Get Transformer output (save and reload model again)
|
289 |
+
hf_bort_model.save_pretrained(pytorch_dump_folder_path)
|
290 |
+
hf_bort_model = BertModel.from_pretrained(pytorch_dump_folder_path)
|
291 |
+
hf_bort_model.eval()
|
292 |
+
|
293 |
+
input_ids = tokenizer.encode_plus(SAMPLE_TEXT, return_tensors="pt")
|
294 |
+
output_hf = hf_bort_model(**input_ids)[0]
|
295 |
+
|
296 |
+
gluon_layer = output_gluon[0].asnumpy()
|
297 |
+
hf_layer = output_hf[0].detach().numpy()
|
298 |
+
|
299 |
+
max_absolute_diff = np.max(np.abs(hf_layer - gluon_layer)).item()
|
300 |
+
success = np.allclose(gluon_layer, hf_layer, atol=1e-3)
|
301 |
+
|
302 |
+
if success:
|
303 |
+
print("✔️ Both model do output the same tensors")
|
304 |
+
else:
|
305 |
+
print("❌ Both model do **NOT** output the same tensors")
|
306 |
+
print("Absolute difference is:", max_absolute_diff)
|
307 |
+
|
308 |
+
|
309 |
+
if __name__ == "__main__":
|
310 |
+
parser = argparse.ArgumentParser()
|
311 |
+
# Required parameters
|
312 |
+
parser.add_argument(
|
313 |
+
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
|
314 |
+
)
|
315 |
+
parser.add_argument(
|
316 |
+
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
|
317 |
+
)
|
318 |
+
args = parser.parse_args()
|
319 |
+
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__init__.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 EleutherAI and The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ....utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_sentencepiece_available,
|
20 |
+
is_tokenizers_available,
|
21 |
+
is_torch_available,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
_import_structure = {
|
26 |
+
"configuration_open_llama": ["OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenLlamaConfig"],
|
27 |
+
}
|
28 |
+
|
29 |
+
try:
|
30 |
+
if not is_sentencepiece_available():
|
31 |
+
raise OptionalDependencyNotAvailable()
|
32 |
+
except OptionalDependencyNotAvailable:
|
33 |
+
pass
|
34 |
+
else:
|
35 |
+
_import_structure["tokenization_open_llama"] = ["LlamaTokenizer"]
|
36 |
+
|
37 |
+
try:
|
38 |
+
if not is_tokenizers_available():
|
39 |
+
raise OptionalDependencyNotAvailable()
|
40 |
+
except OptionalDependencyNotAvailable:
|
41 |
+
pass
|
42 |
+
else:
|
43 |
+
_import_structure["tokenization_open_llama_fast"] = ["LlamaTokenizerFast"]
|
44 |
+
|
45 |
+
try:
|
46 |
+
if not is_torch_available():
|
47 |
+
raise OptionalDependencyNotAvailable()
|
48 |
+
except OptionalDependencyNotAvailable:
|
49 |
+
pass
|
50 |
+
else:
|
51 |
+
_import_structure["modeling_open_llama"] = [
|
52 |
+
"OpenLlamaForCausalLM",
|
53 |
+
"OpenLlamaModel",
|
54 |
+
"OpenLlamaPreTrainedModel",
|
55 |
+
"OpenLlamaForSequenceClassification",
|
56 |
+
]
|
57 |
+
|
58 |
+
|
59 |
+
if TYPE_CHECKING:
|
60 |
+
from .configuration_open_llama import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenLlamaConfig
|
61 |
+
|
62 |
+
try:
|
63 |
+
if not is_sentencepiece_available():
|
64 |
+
raise OptionalDependencyNotAvailable()
|
65 |
+
except OptionalDependencyNotAvailable:
|
66 |
+
pass
|
67 |
+
else:
|
68 |
+
from transformers import LlamaTokenizer
|
69 |
+
|
70 |
+
try:
|
71 |
+
if not is_tokenizers_available():
|
72 |
+
raise OptionalDependencyNotAvailable()
|
73 |
+
except OptionalDependencyNotAvailable:
|
74 |
+
pass
|
75 |
+
else:
|
76 |
+
from transformers import LlamaTokenizerFast
|
77 |
+
|
78 |
+
try:
|
79 |
+
if not is_torch_available():
|
80 |
+
raise OptionalDependencyNotAvailable()
|
81 |
+
except OptionalDependencyNotAvailable:
|
82 |
+
pass
|
83 |
+
else:
|
84 |
+
from .modeling_open_llama import (
|
85 |
+
OpenLlamaForCausalLM,
|
86 |
+
OpenLlamaForSequenceClassification,
|
87 |
+
OpenLlamaModel,
|
88 |
+
OpenLlamaPreTrainedModel,
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
+
else:
|
93 |
+
import sys
|
94 |
+
|
95 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.43 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc
ADDED
Binary file (6.28 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc
ADDED
Binary file (31.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
""" Open-Llama model configuration"""
|
21 |
+
|
22 |
+
from ....configuration_utils import PretrainedConfig
|
23 |
+
from ....utils import logging
|
24 |
+
|
25 |
+
|
26 |
+
logger = logging.get_logger(__name__)
|
27 |
+
|
28 |
+
|
29 |
+
from .._archive_maps import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
30 |
+
|
31 |
+
|
32 |
+
class OpenLlamaConfig(PretrainedConfig):
|
33 |
+
r"""
|
34 |
+
This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an
|
35 |
+
Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a
|
36 |
+
configuration with the defaults will yield a similar configuration to that of the
|
37 |
+
[s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1).
|
38 |
+
|
39 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
40 |
+
documentation from [`PretrainedConfig`] for more information.
|
41 |
+
|
42 |
+
|
43 |
+
Args:
|
44 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
45 |
+
Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by
|
46 |
+
the `inputs_ids` passed when calling [`OpenLlamaModel`]
|
47 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
48 |
+
Dimension of the hidden representations.
|
49 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
50 |
+
Dimension of the MLP representations.
|
51 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
52 |
+
Number of hidden layers in the Transformer encoder.
|
53 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
54 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
55 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
56 |
+
The non-linear activation function (function or string) in the decoder.
|
57 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
58 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
59 |
+
just in case (e.g., 512 or 1024 or 2048).
|
60 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
61 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
62 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
|
63 |
+
The epsilon used by the rms normalization layers.
|
64 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
65 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
66 |
+
relevant if `config.is_decoder=True`.
|
67 |
+
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
68 |
+
Whether to tie weight embeddings
|
69 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
70 |
+
The base period of the RoPE embeddings.
|
71 |
+
rope_scaling (`Dict`, *optional*):
|
72 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
73 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
74 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
75 |
+
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
|
76 |
+
these scaling strategies behave:
|
77 |
+
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
|
78 |
+
experimental feature, subject to breaking API changes in future versions.
|
79 |
+
|
80 |
+
Example:
|
81 |
+
|
82 |
+
```python
|
83 |
+
>>> from transformers import OpenLlamaModel, OpenLlamaConfig
|
84 |
+
|
85 |
+
>>> # Initializing a Open-Llama open_llama-7b style configuration
|
86 |
+
>>> configuration = OpenLlamaConfig()
|
87 |
+
|
88 |
+
>>> # Initializing a model from the open_llama-7b style configuration
|
89 |
+
>>> model = OpenLlamaModel(configuration)
|
90 |
+
|
91 |
+
>>> # Accessing the model configuration
|
92 |
+
>>> configuration = model.config
|
93 |
+
```"""
|
94 |
+
|
95 |
+
model_type = "open-llama"
|
96 |
+
|
97 |
+
def __init__(
|
98 |
+
self,
|
99 |
+
vocab_size=100000,
|
100 |
+
hidden_size=4096,
|
101 |
+
intermediate_size=11008,
|
102 |
+
num_hidden_layers=32,
|
103 |
+
num_attention_heads=32,
|
104 |
+
hidden_act="silu",
|
105 |
+
max_position_embeddings=2048,
|
106 |
+
initializer_range=0.02,
|
107 |
+
rms_norm_eps=1e-6,
|
108 |
+
use_cache=True,
|
109 |
+
pad_token_id=0,
|
110 |
+
bos_token_id=1,
|
111 |
+
eos_token_id=2,
|
112 |
+
tie_word_embeddings=False,
|
113 |
+
use_memory_efficient_attention=True,
|
114 |
+
hidden_dropout_prob=0.1,
|
115 |
+
attention_dropout_prob=0.1,
|
116 |
+
use_stable_embedding=True,
|
117 |
+
shared_input_output_embedding=True,
|
118 |
+
rope_theta=10000.0,
|
119 |
+
rope_scaling=None,
|
120 |
+
**kwargs,
|
121 |
+
):
|
122 |
+
self.vocab_size = vocab_size
|
123 |
+
self.max_position_embeddings = max_position_embeddings
|
124 |
+
self.hidden_size = hidden_size
|
125 |
+
self.intermediate_size = intermediate_size
|
126 |
+
self.num_hidden_layers = num_hidden_layers
|
127 |
+
self.num_attention_heads = num_attention_heads
|
128 |
+
self.hidden_act = hidden_act
|
129 |
+
self.initializer_range = initializer_range
|
130 |
+
self.rms_norm_eps = rms_norm_eps
|
131 |
+
self.use_cache = use_cache
|
132 |
+
self.use_memory_efficient_attention = kwargs.pop(
|
133 |
+
"use_memorry_efficient_attention", use_memory_efficient_attention
|
134 |
+
)
|
135 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
136 |
+
self.attention_dropout_prob = attention_dropout_prob
|
137 |
+
self.use_stable_embedding = use_stable_embedding
|
138 |
+
self.shared_input_output_embedding = shared_input_output_embedding
|
139 |
+
self.rope_theta = rope_theta
|
140 |
+
self.rope_scaling = rope_scaling
|
141 |
+
self._rope_scaling_validation()
|
142 |
+
|
143 |
+
super().__init__(
|
144 |
+
pad_token_id=pad_token_id,
|
145 |
+
bos_token_id=bos_token_id,
|
146 |
+
eos_token_id=eos_token_id,
|
147 |
+
tie_word_embeddings=tie_word_embeddings,
|
148 |
+
**kwargs,
|
149 |
+
)
|
150 |
+
|
151 |
+
# Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
|
152 |
+
def _rope_scaling_validation(self):
|
153 |
+
"""
|
154 |
+
Validate the `rope_scaling` configuration.
|
155 |
+
"""
|
156 |
+
if self.rope_scaling is None:
|
157 |
+
return
|
158 |
+
|
159 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
160 |
+
raise ValueError(
|
161 |
+
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
|
162 |
+
)
|
163 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
164 |
+
rope_scaling_factor = self.rope_scaling.get("factor", None)
|
165 |
+
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
|
166 |
+
raise ValueError(
|
167 |
+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
168 |
+
)
|
169 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
|
170 |
+
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/modeling_open_llama.py
ADDED
@@ -0,0 +1,969 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
""" PyTorch Open-Llama model."""
|
21 |
+
import math
|
22 |
+
from typing import List, Optional, Tuple, Union
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import torch.utils.checkpoint
|
26 |
+
from torch import nn
|
27 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
28 |
+
|
29 |
+
from ....activations import ACT2FN
|
30 |
+
from ....modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
31 |
+
from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
32 |
+
from ....modeling_utils import PreTrainedModel
|
33 |
+
from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
34 |
+
from .configuration_open_llama import OpenLlamaConfig
|
35 |
+
|
36 |
+
|
37 |
+
logger = logging.get_logger(__name__)
|
38 |
+
|
39 |
+
try:
|
40 |
+
from xformers import ops as xops
|
41 |
+
except ImportError:
|
42 |
+
xops = None
|
43 |
+
|
44 |
+
|
45 |
+
_CONFIG_FOR_DOC = "OpenLlamaConfig"
|
46 |
+
|
47 |
+
|
48 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->OpenLlama
|
49 |
+
class OpenLlamaRMSNorm(nn.Module):
|
50 |
+
def __init__(self, hidden_size, eps=1e-6):
|
51 |
+
"""
|
52 |
+
OpenLlamaRMSNorm is equivalent to T5LayerNorm
|
53 |
+
"""
|
54 |
+
super().__init__()
|
55 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
56 |
+
self.variance_epsilon = eps
|
57 |
+
|
58 |
+
def forward(self, hidden_states):
|
59 |
+
input_dtype = hidden_states.dtype
|
60 |
+
hidden_states = hidden_states.to(torch.float32)
|
61 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
62 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
63 |
+
return self.weight * hidden_states.to(input_dtype)
|
64 |
+
|
65 |
+
|
66 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->OpenLlama
|
67 |
+
class OpenLlamaRotaryEmbedding(nn.Module):
|
68 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
69 |
+
super().__init__()
|
70 |
+
|
71 |
+
self.dim = dim
|
72 |
+
self.max_position_embeddings = max_position_embeddings
|
73 |
+
self.base = base
|
74 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
75 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
76 |
+
|
77 |
+
# Build here to make `torch.jit.trace` work.
|
78 |
+
self._set_cos_sin_cache(
|
79 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
80 |
+
)
|
81 |
+
|
82 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
83 |
+
self.max_seq_len_cached = seq_len
|
84 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
85 |
+
|
86 |
+
freqs = torch.outer(t, self.inv_freq)
|
87 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
88 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
89 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
90 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
91 |
+
|
92 |
+
def forward(self, x, seq_len=None):
|
93 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
94 |
+
if seq_len > self.max_seq_len_cached:
|
95 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
96 |
+
|
97 |
+
return (
|
98 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
99 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
100 |
+
)
|
101 |
+
|
102 |
+
|
103 |
+
# Copied from transformers.models.falcon.modeling_falcon.FalconLinearScalingRotaryEmbedding with Falcon->OpenLlama
|
104 |
+
class OpenLlamaLinearScalingRotaryEmbedding(OpenLlamaRotaryEmbedding):
|
105 |
+
"""OpenLlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
106 |
+
|
107 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
108 |
+
self.scaling_factor = scaling_factor
|
109 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
110 |
+
|
111 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
112 |
+
self.max_seq_len_cached = seq_len
|
113 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
114 |
+
t = t / self.scaling_factor
|
115 |
+
|
116 |
+
freqs = torch.outer(t, self.inv_freq)
|
117 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
118 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
119 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
120 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
121 |
+
|
122 |
+
|
123 |
+
# Copied from transformers.models.falcon.modeling_falcon.FalconDynamicNTKScalingRotaryEmbedding with Falcon->OpenLlama
|
124 |
+
class OpenLlamaDynamicNTKScalingRotaryEmbedding(OpenLlamaRotaryEmbedding):
|
125 |
+
"""OpenLlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
|
126 |
+
|
127 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
128 |
+
self.scaling_factor = scaling_factor
|
129 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
130 |
+
|
131 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
132 |
+
self.max_seq_len_cached = seq_len
|
133 |
+
|
134 |
+
if seq_len > self.max_position_embeddings:
|
135 |
+
base = self.base * (
|
136 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
137 |
+
) ** (self.dim / (self.dim - 2))
|
138 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
139 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
140 |
+
|
141 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
142 |
+
|
143 |
+
freqs = torch.outer(t, self.inv_freq)
|
144 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
145 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
146 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
147 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
148 |
+
|
149 |
+
|
150 |
+
def rotate_half(x):
|
151 |
+
"""Rotates half the hidden dims of the input."""
|
152 |
+
x1 = x[..., : x.shape[-1] // 2]
|
153 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
154 |
+
return torch.cat((-x2, x1), dim=-1)
|
155 |
+
|
156 |
+
|
157 |
+
# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
|
158 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
159 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
160 |
+
|
161 |
+
Args:
|
162 |
+
q (`torch.Tensor`): The query tensor.
|
163 |
+
k (`torch.Tensor`): The key tensor.
|
164 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
165 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
166 |
+
position_ids (`torch.Tensor`):
|
167 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
168 |
+
used to pass offsetted position ids when working with a KV-cache.
|
169 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
170 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
171 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
172 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
173 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
174 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
175 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
176 |
+
Returns:
|
177 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
178 |
+
"""
|
179 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
180 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
181 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
182 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
183 |
+
return q_embed, k_embed
|
184 |
+
|
185 |
+
|
186 |
+
class OpenLlamaMLP(nn.Module):
|
187 |
+
def __init__(
|
188 |
+
self,
|
189 |
+
hidden_size: int,
|
190 |
+
intermediate_size: int,
|
191 |
+
hidden_act: str,
|
192 |
+
dropout_prob: float,
|
193 |
+
):
|
194 |
+
super().__init__()
|
195 |
+
self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
196 |
+
self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
|
197 |
+
self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
|
198 |
+
self.act_fn = ACT2FN[hidden_act]
|
199 |
+
self.dropout = nn.Dropout(dropout_prob)
|
200 |
+
|
201 |
+
def forward(self, x):
|
202 |
+
out = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
203 |
+
return self.dropout(out)
|
204 |
+
|
205 |
+
|
206 |
+
class OpenLlamaAttention(nn.Module):
|
207 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
208 |
+
|
209 |
+
def __init__(self, config: OpenLlamaConfig):
|
210 |
+
super().__init__()
|
211 |
+
self.config = config
|
212 |
+
self.hidden_size = config.hidden_size
|
213 |
+
self.num_heads = config.num_attention_heads
|
214 |
+
self.head_dim = self.hidden_size // self.num_heads
|
215 |
+
self.max_position_embeddings = config.max_position_embeddings
|
216 |
+
self.dropout_prob = config.attention_dropout_prob
|
217 |
+
self.rope_theta = config.rope_theta
|
218 |
+
|
219 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
220 |
+
raise ValueError(
|
221 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
222 |
+
f" and `num_heads`: {self.num_heads})."
|
223 |
+
)
|
224 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
225 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
226 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
227 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
228 |
+
self._init_rope()
|
229 |
+
|
230 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->OpenLlama
|
231 |
+
def _init_rope(self):
|
232 |
+
if self.config.rope_scaling is None:
|
233 |
+
self.rotary_emb = OpenLlamaRotaryEmbedding(
|
234 |
+
self.head_dim,
|
235 |
+
max_position_embeddings=self.max_position_embeddings,
|
236 |
+
base=self.rope_theta,
|
237 |
+
)
|
238 |
+
else:
|
239 |
+
scaling_type = self.config.rope_scaling["type"]
|
240 |
+
scaling_factor = self.config.rope_scaling["factor"]
|
241 |
+
if scaling_type == "linear":
|
242 |
+
self.rotary_emb = OpenLlamaLinearScalingRotaryEmbedding(
|
243 |
+
self.head_dim,
|
244 |
+
max_position_embeddings=self.max_position_embeddings,
|
245 |
+
scaling_factor=scaling_factor,
|
246 |
+
base=self.rope_theta,
|
247 |
+
)
|
248 |
+
elif scaling_type == "dynamic":
|
249 |
+
self.rotary_emb = OpenLlamaDynamicNTKScalingRotaryEmbedding(
|
250 |
+
self.head_dim,
|
251 |
+
max_position_embeddings=self.max_position_embeddings,
|
252 |
+
scaling_factor=scaling_factor,
|
253 |
+
base=self.rope_theta,
|
254 |
+
)
|
255 |
+
else:
|
256 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
257 |
+
|
258 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
259 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
260 |
+
|
261 |
+
def forward(
|
262 |
+
self,
|
263 |
+
hidden_states: torch.Tensor,
|
264 |
+
attention_mask: Optional[torch.Tensor] = None,
|
265 |
+
position_ids: Optional[torch.LongTensor] = None,
|
266 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
267 |
+
output_attentions: bool = False,
|
268 |
+
use_cache: bool = False,
|
269 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
270 |
+
bsz, q_len, _ = hidden_states.size()
|
271 |
+
|
272 |
+
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
273 |
+
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
274 |
+
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
275 |
+
|
276 |
+
kv_seq_len = key_states.shape[-2]
|
277 |
+
if past_key_value is not None:
|
278 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
279 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
280 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
281 |
+
# [bsz, nh, t, hd]
|
282 |
+
|
283 |
+
if past_key_value is not None:
|
284 |
+
# reuse k, v, self_attention
|
285 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
286 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
287 |
+
|
288 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
289 |
+
|
290 |
+
if self.config.use_memory_efficient_attention and xops is not None and self.training:
|
291 |
+
attn_weights = None
|
292 |
+
query_states = query_states.transpose(1, 2)
|
293 |
+
key_states = key_states.transpose(1, 2)
|
294 |
+
value_states = value_states.transpose(1, 2)
|
295 |
+
attn_output = xops.memory_efficient_attention(
|
296 |
+
query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask(), p=self.dropout_prob
|
297 |
+
)
|
298 |
+
else:
|
299 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
300 |
+
|
301 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
302 |
+
raise ValueError(
|
303 |
+
f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
|
304 |
+
f" {attn_weights.size()}"
|
305 |
+
)
|
306 |
+
|
307 |
+
if attention_mask is not None:
|
308 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
309 |
+
raise ValueError(
|
310 |
+
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
311 |
+
)
|
312 |
+
attn_weights = attn_weights + attention_mask
|
313 |
+
attn_weights = torch.max(
|
314 |
+
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)
|
315 |
+
)
|
316 |
+
|
317 |
+
# upcast attention to fp32
|
318 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
319 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
320 |
+
|
321 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
322 |
+
raise ValueError(
|
323 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
324 |
+
f" {attn_output.size()}"
|
325 |
+
)
|
326 |
+
|
327 |
+
attn_output = attn_output.transpose(1, 2)
|
328 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
329 |
+
|
330 |
+
attn_output = self.o_proj(attn_output)
|
331 |
+
|
332 |
+
if not output_attentions:
|
333 |
+
attn_weights = None
|
334 |
+
|
335 |
+
return attn_output, attn_weights, past_key_value
|
336 |
+
|
337 |
+
|
338 |
+
class OpenLlamaDecoderLayer(nn.Module):
|
339 |
+
def __init__(self, config: OpenLlamaConfig):
|
340 |
+
super().__init__()
|
341 |
+
self.hidden_size = config.hidden_size
|
342 |
+
self.self_attn = OpenLlamaAttention(config=config)
|
343 |
+
self.mlp = OpenLlamaMLP(
|
344 |
+
hidden_size=self.hidden_size,
|
345 |
+
intermediate_size=config.intermediate_size,
|
346 |
+
hidden_act=config.hidden_act,
|
347 |
+
dropout_prob=config.hidden_dropout_prob,
|
348 |
+
)
|
349 |
+
self.input_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
350 |
+
self.post_attention_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
351 |
+
|
352 |
+
def forward(
|
353 |
+
self,
|
354 |
+
hidden_states: torch.Tensor,
|
355 |
+
attention_mask: Optional[torch.Tensor] = None,
|
356 |
+
position_ids: Optional[torch.LongTensor] = None,
|
357 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
358 |
+
output_attentions: Optional[bool] = False,
|
359 |
+
use_cache: Optional[bool] = False,
|
360 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
361 |
+
"""
|
362 |
+
Args:
|
363 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
364 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
365 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
366 |
+
output_attentions (`bool`, *optional*):
|
367 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
368 |
+
returned tensors for more detail.
|
369 |
+
use_cache (`bool`, *optional*):
|
370 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
371 |
+
(see `past_key_values`).
|
372 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
373 |
+
"""
|
374 |
+
|
375 |
+
residual = hidden_states
|
376 |
+
|
377 |
+
hidden_states = self.input_layernorm(hidden_states)
|
378 |
+
|
379 |
+
# Self Attention
|
380 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
381 |
+
hidden_states=hidden_states,
|
382 |
+
attention_mask=attention_mask,
|
383 |
+
position_ids=position_ids,
|
384 |
+
past_key_value=past_key_value,
|
385 |
+
output_attentions=output_attentions,
|
386 |
+
use_cache=use_cache,
|
387 |
+
)
|
388 |
+
hidden_states = residual + hidden_states
|
389 |
+
|
390 |
+
# Fully Connected
|
391 |
+
residual = hidden_states
|
392 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
393 |
+
hidden_states = self.mlp(hidden_states)
|
394 |
+
hidden_states = residual + hidden_states
|
395 |
+
|
396 |
+
outputs = (hidden_states,)
|
397 |
+
|
398 |
+
if output_attentions:
|
399 |
+
outputs += (self_attn_weights,)
|
400 |
+
|
401 |
+
if use_cache:
|
402 |
+
outputs += (present_key_value,)
|
403 |
+
|
404 |
+
return outputs
|
405 |
+
|
406 |
+
|
407 |
+
OPEN_LLAMA_START_DOCSTRING = r"""
|
408 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
409 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
410 |
+
etc.)
|
411 |
+
|
412 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
413 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
414 |
+
and behavior.
|
415 |
+
|
416 |
+
Parameters:
|
417 |
+
config ([`OpenLlamaConfig`]):
|
418 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
419 |
+
load the weights associated with the model, only the configuration. Check out the
|
420 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
421 |
+
"""
|
422 |
+
|
423 |
+
|
424 |
+
@add_start_docstrings(
|
425 |
+
"The bare Open-Llama Model outputting raw hidden-states without any specific head on top.",
|
426 |
+
OPEN_LLAMA_START_DOCSTRING,
|
427 |
+
)
|
428 |
+
class OpenLlamaPreTrainedModel(PreTrainedModel):
|
429 |
+
config_class = OpenLlamaConfig
|
430 |
+
base_model_prefix = "model"
|
431 |
+
supports_gradient_checkpointing = True
|
432 |
+
_no_split_modules = ["OpenLlamaDecoderLayer"]
|
433 |
+
|
434 |
+
def _init_weights(self, module):
|
435 |
+
std = self.config.initializer_range
|
436 |
+
if isinstance(module, nn.Linear):
|
437 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
438 |
+
if module.bias is not None:
|
439 |
+
module.bias.data.zero_()
|
440 |
+
elif isinstance(module, nn.Embedding):
|
441 |
+
if self.config.use_stable_embedding:
|
442 |
+
torch.nn.init.xavier_normal_(module.weight.data)
|
443 |
+
else:
|
444 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
445 |
+
if module.padding_idx is not None:
|
446 |
+
module.weight.data[module.padding_idx].zero_()
|
447 |
+
|
448 |
+
|
449 |
+
OPEN_LLAMA_INPUTS_DOCSTRING = r"""
|
450 |
+
Args:
|
451 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
452 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
453 |
+
it.
|
454 |
+
|
455 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
456 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
457 |
+
|
458 |
+
[What are input IDs?](../glossary#input-ids)
|
459 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
460 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
461 |
+
|
462 |
+
- 1 for tokens that are **not masked**,
|
463 |
+
- 0 for tokens that are **masked**.
|
464 |
+
|
465 |
+
[What are attention masks?](../glossary#attention-mask)
|
466 |
+
|
467 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
468 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
469 |
+
|
470 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
471 |
+
`past_key_values`).
|
472 |
+
|
473 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
474 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
475 |
+
information on the default strategy.
|
476 |
+
|
477 |
+
- 1 indicates the head is **not masked**,
|
478 |
+
- 0 indicates the head is **masked**.
|
479 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
480 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
481 |
+
config.n_positions - 1]`.
|
482 |
+
|
483 |
+
[What are position IDs?](../glossary#position-ids)
|
484 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
485 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
486 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
487 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
488 |
+
|
489 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
490 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
491 |
+
|
492 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
493 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
494 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
495 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
496 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
497 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
498 |
+
model's internal embedding lookup matrix.
|
499 |
+
use_cache (`bool`, *optional*):
|
500 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
501 |
+
`past_key_values`).
|
502 |
+
output_attentions (`bool`, *optional*):
|
503 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
504 |
+
tensors for more detail.
|
505 |
+
output_hidden_states (`bool`, *optional*):
|
506 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
507 |
+
more detail.
|
508 |
+
return_dict (`bool`, *optional*):
|
509 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
510 |
+
"""
|
511 |
+
|
512 |
+
|
513 |
+
@add_start_docstrings(
|
514 |
+
"The bare Open-Llama Model outputting raw hidden-states without any specific head on top.",
|
515 |
+
OPEN_LLAMA_START_DOCSTRING,
|
516 |
+
)
|
517 |
+
class OpenLlamaModel(OpenLlamaPreTrainedModel):
|
518 |
+
"""
|
519 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OpenLlamaDecoderLayer`]
|
520 |
+
|
521 |
+
Args:
|
522 |
+
config: OpenLlamaConfig
|
523 |
+
"""
|
524 |
+
|
525 |
+
def __init__(self, config: OpenLlamaConfig):
|
526 |
+
super().__init__(config)
|
527 |
+
self.padding_idx = config.pad_token_id
|
528 |
+
self.vocab_size = config.vocab_size
|
529 |
+
|
530 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
531 |
+
if config.use_stable_embedding:
|
532 |
+
self.embed_layer_norm = nn.LayerNorm(config.hidden_size)
|
533 |
+
else:
|
534 |
+
self.embed_layer_norm = None
|
535 |
+
self.layers = nn.ModuleList([OpenLlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
536 |
+
self.norm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
537 |
+
|
538 |
+
self.gradient_checkpointing = False
|
539 |
+
# Initialize weights and apply final processing
|
540 |
+
self.post_init()
|
541 |
+
|
542 |
+
def get_input_embeddings(self):
|
543 |
+
return self.embed_tokens
|
544 |
+
|
545 |
+
def set_input_embeddings(self, value):
|
546 |
+
self.embed_tokens = value
|
547 |
+
|
548 |
+
@add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
|
549 |
+
def forward(
|
550 |
+
self,
|
551 |
+
input_ids: torch.LongTensor = None,
|
552 |
+
attention_mask: Optional[torch.Tensor] = None,
|
553 |
+
position_ids: Optional[torch.LongTensor] = None,
|
554 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
555 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
556 |
+
use_cache: Optional[bool] = None,
|
557 |
+
output_attentions: Optional[bool] = None,
|
558 |
+
output_hidden_states: Optional[bool] = None,
|
559 |
+
return_dict: Optional[bool] = None,
|
560 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
561 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
562 |
+
output_hidden_states = (
|
563 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
564 |
+
)
|
565 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
566 |
+
|
567 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
568 |
+
|
569 |
+
# retrieve input_ids and inputs_embeds
|
570 |
+
if input_ids is not None and inputs_embeds is not None:
|
571 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
572 |
+
elif input_ids is not None:
|
573 |
+
batch_size, seq_length = input_ids.shape
|
574 |
+
elif inputs_embeds is not None:
|
575 |
+
batch_size, seq_length, _ = inputs_embeds.shape
|
576 |
+
else:
|
577 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
578 |
+
|
579 |
+
seq_length_with_past = seq_length
|
580 |
+
past_key_values_length = 0
|
581 |
+
|
582 |
+
if self.gradient_checkpointing and self.training:
|
583 |
+
if use_cache:
|
584 |
+
logger.warning_once(
|
585 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
586 |
+
)
|
587 |
+
use_cache = False
|
588 |
+
|
589 |
+
if past_key_values is not None:
|
590 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
591 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
592 |
+
|
593 |
+
if position_ids is None:
|
594 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
595 |
+
position_ids = torch.arange(
|
596 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
597 |
+
)
|
598 |
+
position_ids = position_ids.unsqueeze(0)
|
599 |
+
|
600 |
+
if inputs_embeds is None:
|
601 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
602 |
+
if self.embed_layer_norm:
|
603 |
+
inputs_embeds = self.embed_layer_norm(inputs_embeds)
|
604 |
+
# embed positions
|
605 |
+
if self.config.use_memory_efficient_attention and self.training:
|
606 |
+
attention_mask = None
|
607 |
+
elif attention_mask is None:
|
608 |
+
attention_mask = torch.ones(
|
609 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
610 |
+
)
|
611 |
+
|
612 |
+
input_shape = (batch_size, seq_length)
|
613 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
614 |
+
attention_mask, input_shape, inputs_embeds, past_key_values_length
|
615 |
+
)
|
616 |
+
|
617 |
+
hidden_states = inputs_embeds
|
618 |
+
|
619 |
+
# decoder layers
|
620 |
+
all_hidden_states = () if output_hidden_states else None
|
621 |
+
all_self_attns = () if output_attentions else None
|
622 |
+
next_decoder_cache = () if use_cache else None
|
623 |
+
|
624 |
+
for idx, decoder_layer in enumerate(self.layers):
|
625 |
+
if output_hidden_states:
|
626 |
+
all_hidden_states += (hidden_states,)
|
627 |
+
|
628 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
629 |
+
|
630 |
+
if self.gradient_checkpointing and self.training:
|
631 |
+
layer_outputs = self._gradient_checkpointing_func(
|
632 |
+
decoder_layer.__call__,
|
633 |
+
hidden_states,
|
634 |
+
attention_mask,
|
635 |
+
position_ids,
|
636 |
+
None,
|
637 |
+
output_attentions,
|
638 |
+
None,
|
639 |
+
)
|
640 |
+
else:
|
641 |
+
layer_outputs = decoder_layer(
|
642 |
+
hidden_states,
|
643 |
+
attention_mask=attention_mask,
|
644 |
+
position_ids=position_ids,
|
645 |
+
past_key_value=past_key_value,
|
646 |
+
output_attentions=output_attentions,
|
647 |
+
use_cache=use_cache,
|
648 |
+
)
|
649 |
+
|
650 |
+
hidden_states = layer_outputs[0]
|
651 |
+
|
652 |
+
if use_cache:
|
653 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
654 |
+
|
655 |
+
if output_attentions:
|
656 |
+
all_self_attns += (layer_outputs[1],)
|
657 |
+
|
658 |
+
hidden_states = self.norm(hidden_states)
|
659 |
+
|
660 |
+
# add hidden states from the last decoder layer
|
661 |
+
if output_hidden_states:
|
662 |
+
all_hidden_states += (hidden_states,)
|
663 |
+
|
664 |
+
next_cache = next_decoder_cache if use_cache else None
|
665 |
+
if not return_dict:
|
666 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
667 |
+
return BaseModelOutputWithPast(
|
668 |
+
last_hidden_state=hidden_states,
|
669 |
+
past_key_values=next_cache,
|
670 |
+
hidden_states=all_hidden_states,
|
671 |
+
attentions=all_self_attns,
|
672 |
+
)
|
673 |
+
|
674 |
+
|
675 |
+
class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel):
|
676 |
+
def __init__(self, config):
|
677 |
+
super().__init__(config)
|
678 |
+
self.model = OpenLlamaModel(config)
|
679 |
+
if config.shared_input_output_embedding:
|
680 |
+
self.lm_head = None
|
681 |
+
else:
|
682 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
683 |
+
|
684 |
+
# Initialize weights and apply final processing
|
685 |
+
self.post_init()
|
686 |
+
|
687 |
+
def get_input_embeddings(self):
|
688 |
+
return self.model.embed_tokens
|
689 |
+
|
690 |
+
def set_input_embeddings(self, value):
|
691 |
+
self.model.embed_tokens = value
|
692 |
+
|
693 |
+
def get_output_embeddings(self):
|
694 |
+
return self.lm_head
|
695 |
+
|
696 |
+
def set_output_embeddings(self, new_embeddings):
|
697 |
+
self.lm_head = new_embeddings
|
698 |
+
|
699 |
+
def set_decoder(self, decoder):
|
700 |
+
self.model = decoder
|
701 |
+
|
702 |
+
def get_decoder(self):
|
703 |
+
return self.model
|
704 |
+
|
705 |
+
@add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
|
706 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
707 |
+
def forward(
|
708 |
+
self,
|
709 |
+
input_ids: torch.LongTensor = None,
|
710 |
+
attention_mask: Optional[torch.Tensor] = None,
|
711 |
+
position_ids: Optional[torch.LongTensor] = None,
|
712 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
713 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
714 |
+
labels: Optional[torch.LongTensor] = None,
|
715 |
+
use_cache: Optional[bool] = None,
|
716 |
+
output_attentions: Optional[bool] = None,
|
717 |
+
output_hidden_states: Optional[bool] = None,
|
718 |
+
return_dict: Optional[bool] = None,
|
719 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
720 |
+
r"""
|
721 |
+
Args:
|
722 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
723 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
724 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
725 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
726 |
+
|
727 |
+
Returns:
|
728 |
+
|
729 |
+
Example:
|
730 |
+
|
731 |
+
```python
|
732 |
+
>>> from transformers import AutoTokenizer, OpenLlamaForCausalLM
|
733 |
+
|
734 |
+
>>> model = OpenLlamaForCausalLM.from_pretrained("openlm-research/open_llama_7b")
|
735 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
|
736 |
+
|
737 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
738 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
739 |
+
|
740 |
+
>>> # Generate
|
741 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
742 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
743 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
744 |
+
```"""
|
745 |
+
|
746 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
747 |
+
output_hidden_states = (
|
748 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
749 |
+
)
|
750 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
751 |
+
|
752 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
753 |
+
outputs = self.model(
|
754 |
+
input_ids=input_ids,
|
755 |
+
attention_mask=attention_mask,
|
756 |
+
position_ids=position_ids,
|
757 |
+
past_key_values=past_key_values,
|
758 |
+
inputs_embeds=inputs_embeds,
|
759 |
+
use_cache=use_cache,
|
760 |
+
output_attentions=output_attentions,
|
761 |
+
output_hidden_states=output_hidden_states,
|
762 |
+
return_dict=return_dict,
|
763 |
+
)
|
764 |
+
|
765 |
+
hidden_states = outputs[0]
|
766 |
+
if self.config.shared_input_output_embedding:
|
767 |
+
logits = torch.einsum(
|
768 |
+
"blh,vh->blv", hidden_states.to(self.model.embed_tokens.weight.device), self.model.embed_tokens.weight
|
769 |
+
)
|
770 |
+
else:
|
771 |
+
logits = self.lm_head(hidden_states)
|
772 |
+
|
773 |
+
loss = None
|
774 |
+
if labels is not None:
|
775 |
+
# move labels to correct device to enable model parallelism
|
776 |
+
labels = labels.to(logits.device)
|
777 |
+
# Shift so that tokens < n predict n
|
778 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
779 |
+
shift_labels = labels[..., 1:].contiguous()
|
780 |
+
# Flatten the tokens
|
781 |
+
loss_fct = CrossEntropyLoss()
|
782 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
783 |
+
shift_labels = shift_labels.view(-1)
|
784 |
+
# Enable model parallelism
|
785 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
786 |
+
loss = loss_fct(shift_logits, shift_labels)
|
787 |
+
|
788 |
+
if not return_dict:
|
789 |
+
output = (logits,) + outputs[1:]
|
790 |
+
return (loss,) + output if loss is not None else output
|
791 |
+
|
792 |
+
return CausalLMOutputWithPast(
|
793 |
+
loss=loss,
|
794 |
+
logits=logits,
|
795 |
+
past_key_values=outputs.past_key_values,
|
796 |
+
hidden_states=outputs.hidden_states,
|
797 |
+
attentions=outputs.attentions,
|
798 |
+
)
|
799 |
+
|
800 |
+
def prepare_inputs_for_generation(
|
801 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
802 |
+
):
|
803 |
+
if past_key_values is not None:
|
804 |
+
past_length = past_key_values[0][0].shape[2]
|
805 |
+
|
806 |
+
# Some generation methods already pass only the last input ID
|
807 |
+
if input_ids.shape[1] > past_length:
|
808 |
+
remove_prefix_length = past_length
|
809 |
+
else:
|
810 |
+
# Default to old behavior: keep only final ID
|
811 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
812 |
+
|
813 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
814 |
+
|
815 |
+
position_ids = kwargs.get("position_ids", None)
|
816 |
+
if attention_mask is not None and position_ids is None:
|
817 |
+
# create position_ids on the fly for batch generation
|
818 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
819 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
820 |
+
if past_key_values:
|
821 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
822 |
+
|
823 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
824 |
+
if inputs_embeds is not None and past_key_values is None:
|
825 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
826 |
+
else:
|
827 |
+
model_inputs = {"input_ids": input_ids}
|
828 |
+
|
829 |
+
model_inputs.update(
|
830 |
+
{
|
831 |
+
"position_ids": position_ids,
|
832 |
+
"past_key_values": past_key_values,
|
833 |
+
"use_cache": kwargs.get("use_cache"),
|
834 |
+
"attention_mask": attention_mask,
|
835 |
+
}
|
836 |
+
)
|
837 |
+
return model_inputs
|
838 |
+
|
839 |
+
@staticmethod
|
840 |
+
def _reorder_cache(past_key_values, beam_idx):
|
841 |
+
reordered_past = ()
|
842 |
+
for layer_past in past_key_values:
|
843 |
+
reordered_past += (
|
844 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
845 |
+
)
|
846 |
+
return reordered_past
|
847 |
+
|
848 |
+
|
849 |
+
@add_start_docstrings(
|
850 |
+
"""
|
851 |
+
The LLaMa Model transformer with a sequence classification head on top (linear layer).
|
852 |
+
|
853 |
+
[`OpenLlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal
|
854 |
+
models (e.g. GPT-2) do.
|
855 |
+
|
856 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
857 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
858 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
859 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
860 |
+
each row of the batch).
|
861 |
+
""",
|
862 |
+
OPEN_LLAMA_START_DOCSTRING,
|
863 |
+
)
|
864 |
+
class OpenLlamaForSequenceClassification(OpenLlamaPreTrainedModel):
|
865 |
+
def __init__(self, config):
|
866 |
+
super().__init__(config)
|
867 |
+
self.num_labels = config.num_labels
|
868 |
+
self.model = OpenLlamaModel(config)
|
869 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
870 |
+
|
871 |
+
# Initialize weights and apply final processing
|
872 |
+
self.post_init()
|
873 |
+
|
874 |
+
def get_input_embeddings(self):
|
875 |
+
return self.model.embed_tokens
|
876 |
+
|
877 |
+
def set_input_embeddings(self, value):
|
878 |
+
self.model.embed_tokens = value
|
879 |
+
|
880 |
+
@add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
|
881 |
+
def forward(
|
882 |
+
self,
|
883 |
+
input_ids: torch.LongTensor = None,
|
884 |
+
attention_mask: Optional[torch.Tensor] = None,
|
885 |
+
position_ids: Optional[torch.LongTensor] = None,
|
886 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
887 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
888 |
+
labels: Optional[torch.LongTensor] = None,
|
889 |
+
use_cache: Optional[bool] = None,
|
890 |
+
output_attentions: Optional[bool] = None,
|
891 |
+
output_hidden_states: Optional[bool] = None,
|
892 |
+
return_dict: Optional[bool] = None,
|
893 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
894 |
+
r"""
|
895 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
896 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
897 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
898 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
899 |
+
"""
|
900 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
901 |
+
|
902 |
+
transformer_outputs = self.model(
|
903 |
+
input_ids,
|
904 |
+
attention_mask=attention_mask,
|
905 |
+
position_ids=position_ids,
|
906 |
+
past_key_values=past_key_values,
|
907 |
+
inputs_embeds=inputs_embeds,
|
908 |
+
use_cache=use_cache,
|
909 |
+
output_attentions=output_attentions,
|
910 |
+
output_hidden_states=output_hidden_states,
|
911 |
+
return_dict=return_dict,
|
912 |
+
)
|
913 |
+
hidden_states = transformer_outputs[0]
|
914 |
+
logits = self.score(hidden_states)
|
915 |
+
|
916 |
+
if input_ids is not None:
|
917 |
+
batch_size = input_ids.shape[0]
|
918 |
+
else:
|
919 |
+
batch_size = inputs_embeds.shape[0]
|
920 |
+
|
921 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
922 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
923 |
+
if self.config.pad_token_id is None:
|
924 |
+
sequence_lengths = -1
|
925 |
+
else:
|
926 |
+
if input_ids is not None:
|
927 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
928 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
929 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
930 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
931 |
+
else:
|
932 |
+
sequence_lengths = -1
|
933 |
+
|
934 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
935 |
+
|
936 |
+
loss = None
|
937 |
+
if labels is not None:
|
938 |
+
labels = labels.to(logits.device)
|
939 |
+
if self.config.problem_type is None:
|
940 |
+
if self.num_labels == 1:
|
941 |
+
self.config.problem_type = "regression"
|
942 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
943 |
+
self.config.problem_type = "single_label_classification"
|
944 |
+
else:
|
945 |
+
self.config.problem_type = "multi_label_classification"
|
946 |
+
|
947 |
+
if self.config.problem_type == "regression":
|
948 |
+
loss_fct = MSELoss()
|
949 |
+
if self.num_labels == 1:
|
950 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
951 |
+
else:
|
952 |
+
loss = loss_fct(pooled_logits, labels)
|
953 |
+
elif self.config.problem_type == "single_label_classification":
|
954 |
+
loss_fct = CrossEntropyLoss()
|
955 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
956 |
+
elif self.config.problem_type == "multi_label_classification":
|
957 |
+
loss_fct = BCEWithLogitsLoss()
|
958 |
+
loss = loss_fct(pooled_logits, labels)
|
959 |
+
if not return_dict:
|
960 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
961 |
+
return ((loss,) + output) if loss is not None else output
|
962 |
+
|
963 |
+
return SequenceClassifierOutputWithPast(
|
964 |
+
loss=loss,
|
965 |
+
logits=pooled_logits,
|
966 |
+
past_key_values=transformer_outputs.past_key_values,
|
967 |
+
hidden_states=transformer_outputs.hidden_states,
|
968 |
+
attentions=transformer_outputs.attentions,
|
969 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig"],
|
22 |
+
"tokenization_retribert": ["RetriBertTokenizer"],
|
23 |
+
}
|
24 |
+
|
25 |
+
try:
|
26 |
+
if not is_tokenizers_available():
|
27 |
+
raise OptionalDependencyNotAvailable()
|
28 |
+
except OptionalDependencyNotAvailable:
|
29 |
+
pass
|
30 |
+
else:
|
31 |
+
_import_structure["tokenization_retribert_fast"] = ["RetriBertTokenizerFast"]
|
32 |
+
|
33 |
+
try:
|
34 |
+
if not is_torch_available():
|
35 |
+
raise OptionalDependencyNotAvailable()
|
36 |
+
except OptionalDependencyNotAvailable:
|
37 |
+
pass
|
38 |
+
else:
|
39 |
+
_import_structure["modeling_retribert"] = [
|
40 |
+
"RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
41 |
+
"RetriBertModel",
|
42 |
+
"RetriBertPreTrainedModel",
|
43 |
+
]
|
44 |
+
|
45 |
+
|
46 |
+
if TYPE_CHECKING:
|
47 |
+
from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
|
48 |
+
from .tokenization_retribert import RetriBertTokenizer
|
49 |
+
|
50 |
+
try:
|
51 |
+
if not is_tokenizers_available():
|
52 |
+
raise OptionalDependencyNotAvailable()
|
53 |
+
except OptionalDependencyNotAvailable:
|
54 |
+
pass
|
55 |
+
else:
|
56 |
+
from .tokenization_retribert_fast import RetriBertTokenizerFast
|
57 |
+
|
58 |
+
try:
|
59 |
+
if not is_torch_available():
|
60 |
+
raise OptionalDependencyNotAvailable()
|
61 |
+
except OptionalDependencyNotAvailable:
|
62 |
+
pass
|
63 |
+
else:
|
64 |
+
from .modeling_retribert import (
|
65 |
+
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
66 |
+
RetriBertModel,
|
67 |
+
RetriBertPreTrainedModel,
|
68 |
+
)
|
69 |
+
|
70 |
+
else:
|
71 |
+
import sys
|
72 |
+
|
73 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.24 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc
ADDED
Binary file (4.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc
ADDED
Binary file (7.47 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc
ADDED
Binary file (17.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc
ADDED
Binary file (7.01 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" RetriBERT model configuration"""
|
16 |
+
|
17 |
+
from ....configuration_utils import PretrainedConfig
|
18 |
+
from ....utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
from .._archive_maps import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
24 |
+
|
25 |
+
|
26 |
+
class RetriBertConfig(PretrainedConfig):
|
27 |
+
r"""
|
28 |
+
This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a
|
29 |
+
RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a
|
30 |
+
configuration with the defaults will yield a similar configuration to that of the RetriBERT
|
31 |
+
[yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture.
|
32 |
+
|
33 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
34 |
+
documentation from [`PretrainedConfig`] for more information.
|
35 |
+
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
39 |
+
Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by
|
40 |
+
the `inputs_ids` passed when calling [`RetriBertModel`]
|
41 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
42 |
+
Dimensionality of the encoder layers and the pooler layer.
|
43 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
44 |
+
Number of hidden layers in the Transformer encoder.
|
45 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
46 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
47 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
48 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
49 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
50 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
51 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
52 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
53 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
54 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
55 |
+
The dropout ratio for the attention probabilities.
|
56 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
57 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
58 |
+
just in case (e.g., 512 or 1024 or 2048).
|
59 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
60 |
+
The vocabulary size of the *token_type_ids* passed into [`BertModel`].
|
61 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
62 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
63 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
64 |
+
The epsilon used by the layer normalization layers.
|
65 |
+
share_encoders (`bool`, *optional*, defaults to `True`):
|
66 |
+
Whether or not to use the same Bert-type encoder for the queries and document
|
67 |
+
projection_dim (`int`, *optional*, defaults to 128):
|
68 |
+
Final dimension of the query and document representation after projection
|
69 |
+
"""
|
70 |
+
|
71 |
+
model_type = "retribert"
|
72 |
+
|
73 |
+
def __init__(
|
74 |
+
self,
|
75 |
+
vocab_size=30522,
|
76 |
+
hidden_size=768,
|
77 |
+
num_hidden_layers=8,
|
78 |
+
num_attention_heads=12,
|
79 |
+
intermediate_size=3072,
|
80 |
+
hidden_act="gelu",
|
81 |
+
hidden_dropout_prob=0.1,
|
82 |
+
attention_probs_dropout_prob=0.1,
|
83 |
+
max_position_embeddings=512,
|
84 |
+
type_vocab_size=2,
|
85 |
+
initializer_range=0.02,
|
86 |
+
layer_norm_eps=1e-12,
|
87 |
+
share_encoders=True,
|
88 |
+
projection_dim=128,
|
89 |
+
pad_token_id=0,
|
90 |
+
**kwargs,
|
91 |
+
):
|
92 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
93 |
+
|
94 |
+
self.vocab_size = vocab_size
|
95 |
+
self.hidden_size = hidden_size
|
96 |
+
self.num_hidden_layers = num_hidden_layers
|
97 |
+
self.num_attention_heads = num_attention_heads
|
98 |
+
self.hidden_act = hidden_act
|
99 |
+
self.intermediate_size = intermediate_size
|
100 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
101 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
102 |
+
self.max_position_embeddings = max_position_embeddings
|
103 |
+
self.type_vocab_size = type_vocab_size
|
104 |
+
self.initializer_range = initializer_range
|
105 |
+
self.layer_norm_eps = layer_norm_eps
|
106 |
+
self.share_encoders = share_encoders
|
107 |
+
self.projection_dim = projection_dim
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
RetriBERT model
|
17 |
+
"""
|
18 |
+
|
19 |
+
|
20 |
+
import math
|
21 |
+
from typing import Optional
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.utils.checkpoint as checkpoint
|
25 |
+
from torch import nn
|
26 |
+
|
27 |
+
from ....modeling_utils import PreTrainedModel
|
28 |
+
from ....utils import add_start_docstrings, logging
|
29 |
+
from ...bert.modeling_bert import BertModel
|
30 |
+
from .configuration_retribert import RetriBertConfig
|
31 |
+
|
32 |
+
|
33 |
+
logger = logging.get_logger(__name__)
|
34 |
+
|
35 |
+
|
36 |
+
from .._archive_maps import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
37 |
+
|
38 |
+
|
39 |
+
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
|
40 |
+
class RetriBertPreTrainedModel(PreTrainedModel):
|
41 |
+
"""
|
42 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
43 |
+
models.
|
44 |
+
"""
|
45 |
+
|
46 |
+
config_class = RetriBertConfig
|
47 |
+
load_tf_weights = None
|
48 |
+
base_model_prefix = "retribert"
|
49 |
+
|
50 |
+
def _init_weights(self, module):
|
51 |
+
"""Initialize the weights"""
|
52 |
+
if isinstance(module, nn.Linear):
|
53 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
54 |
+
if module.bias is not None:
|
55 |
+
module.bias.data.zero_()
|
56 |
+
elif isinstance(module, nn.Embedding):
|
57 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
58 |
+
if module.padding_idx is not None:
|
59 |
+
module.weight.data[module.padding_idx].zero_()
|
60 |
+
elif isinstance(module, nn.LayerNorm):
|
61 |
+
module.bias.data.zero_()
|
62 |
+
module.weight.data.fill_(1.0)
|
63 |
+
|
64 |
+
|
65 |
+
RETRIBERT_START_DOCSTRING = r"""
|
66 |
+
|
67 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
68 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
69 |
+
etc.)
|
70 |
+
|
71 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
72 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
73 |
+
and behavior.
|
74 |
+
|
75 |
+
Parameters:
|
76 |
+
config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.
|
77 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
78 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
79 |
+
"""
|
80 |
+
|
81 |
+
|
82 |
+
@add_start_docstrings(
|
83 |
+
"""Bert Based model to embed queries or document for document retrieval.""",
|
84 |
+
RETRIBERT_START_DOCSTRING,
|
85 |
+
)
|
86 |
+
class RetriBertModel(RetriBertPreTrainedModel):
|
87 |
+
def __init__(self, config: RetriBertConfig) -> None:
|
88 |
+
super().__init__(config)
|
89 |
+
self.projection_dim = config.projection_dim
|
90 |
+
|
91 |
+
self.bert_query = BertModel(config)
|
92 |
+
self.bert_doc = None if config.share_encoders else BertModel(config)
|
93 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
94 |
+
self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
95 |
+
self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
96 |
+
|
97 |
+
self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
|
98 |
+
|
99 |
+
# Initialize weights and apply final processing
|
100 |
+
self.post_init()
|
101 |
+
|
102 |
+
def embed_sentences_checkpointed(
|
103 |
+
self,
|
104 |
+
input_ids,
|
105 |
+
attention_mask,
|
106 |
+
sent_encoder,
|
107 |
+
checkpoint_batch_size=-1,
|
108 |
+
):
|
109 |
+
# reproduces BERT forward pass with checkpointing
|
110 |
+
if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
|
111 |
+
return sent_encoder(input_ids, attention_mask=attention_mask)[1]
|
112 |
+
else:
|
113 |
+
# prepare implicit variables
|
114 |
+
device = input_ids.device
|
115 |
+
input_shape = input_ids.size()
|
116 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
117 |
+
head_mask = [None] * sent_encoder.config.num_hidden_layers
|
118 |
+
extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
|
119 |
+
attention_mask, input_shape
|
120 |
+
)
|
121 |
+
|
122 |
+
# define function for checkpointing
|
123 |
+
def partial_encode(*inputs):
|
124 |
+
encoder_outputs = sent_encoder.encoder(
|
125 |
+
inputs[0],
|
126 |
+
attention_mask=inputs[1],
|
127 |
+
head_mask=head_mask,
|
128 |
+
)
|
129 |
+
sequence_output = encoder_outputs[0]
|
130 |
+
pooled_output = sent_encoder.pooler(sequence_output)
|
131 |
+
return pooled_output
|
132 |
+
|
133 |
+
# run embedding layer on everything at once
|
134 |
+
embedding_output = sent_encoder.embeddings(
|
135 |
+
input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
|
136 |
+
)
|
137 |
+
# run encoding and pooling on one mini-batch at a time
|
138 |
+
pooled_output_list = []
|
139 |
+
for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
|
140 |
+
b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
|
141 |
+
b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
|
142 |
+
pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
|
143 |
+
pooled_output_list.append(pooled_output)
|
144 |
+
return torch.cat(pooled_output_list, dim=0)
|
145 |
+
|
146 |
+
def embed_questions(
|
147 |
+
self,
|
148 |
+
input_ids,
|
149 |
+
attention_mask=None,
|
150 |
+
checkpoint_batch_size=-1,
|
151 |
+
):
|
152 |
+
q_reps = self.embed_sentences_checkpointed(
|
153 |
+
input_ids,
|
154 |
+
attention_mask,
|
155 |
+
self.bert_query,
|
156 |
+
checkpoint_batch_size,
|
157 |
+
)
|
158 |
+
return self.project_query(q_reps)
|
159 |
+
|
160 |
+
def embed_answers(
|
161 |
+
self,
|
162 |
+
input_ids,
|
163 |
+
attention_mask=None,
|
164 |
+
checkpoint_batch_size=-1,
|
165 |
+
):
|
166 |
+
a_reps = self.embed_sentences_checkpointed(
|
167 |
+
input_ids,
|
168 |
+
attention_mask,
|
169 |
+
self.bert_query if self.bert_doc is None else self.bert_doc,
|
170 |
+
checkpoint_batch_size,
|
171 |
+
)
|
172 |
+
return self.project_doc(a_reps)
|
173 |
+
|
174 |
+
def forward(
|
175 |
+
self,
|
176 |
+
input_ids_query: torch.LongTensor,
|
177 |
+
attention_mask_query: Optional[torch.FloatTensor],
|
178 |
+
input_ids_doc: torch.LongTensor,
|
179 |
+
attention_mask_doc: Optional[torch.FloatTensor],
|
180 |
+
checkpoint_batch_size: int = -1,
|
181 |
+
) -> torch.FloatTensor:
|
182 |
+
r"""
|
183 |
+
Args:
|
184 |
+
input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
185 |
+
Indices of input sequence tokens in the vocabulary for the queries in a batch.
|
186 |
+
|
187 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
188 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
189 |
+
|
190 |
+
[What are input IDs?](../glossary#input-ids)
|
191 |
+
attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
192 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
193 |
+
|
194 |
+
- 1 for tokens that are **not masked**,
|
195 |
+
- 0 for tokens that are **masked**.
|
196 |
+
|
197 |
+
[What are attention masks?](../glossary#attention-mask)
|
198 |
+
input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
199 |
+
Indices of input sequence tokens in the vocabulary for the documents in a batch.
|
200 |
+
attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
201 |
+
Mask to avoid performing attention on documents padding token indices.
|
202 |
+
checkpoint_batch_size (`int`, *optional*, defaults to `-1`):
|
203 |
+
If greater than 0, uses gradient checkpointing to only compute sequence representation on
|
204 |
+
`checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to
|
205 |
+
all document representations in the batch.
|
206 |
+
|
207 |
+
Return:
|
208 |
+
`torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its
|
209 |
+
corresponding document and each document to its corresponding query in the batch
|
210 |
+
"""
|
211 |
+
device = input_ids_query.device
|
212 |
+
q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
|
213 |
+
a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
|
214 |
+
compare_scores = torch.mm(q_reps, a_reps.t())
|
215 |
+
loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
|
216 |
+
loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
|
217 |
+
loss = (loss_qa + loss_aq) / 2
|
218 |
+
return loss
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py
ADDED
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for RetriBERT."""
|
16 |
+
|
17 |
+
import collections
|
18 |
+
import os
|
19 |
+
import unicodedata
|
20 |
+
from typing import List, Optional, Tuple
|
21 |
+
|
22 |
+
from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
|
23 |
+
from ....utils import logging
|
24 |
+
|
25 |
+
|
26 |
+
logger = logging.get_logger(__name__)
|
27 |
+
|
28 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
|
29 |
+
|
30 |
+
|
31 |
+
# Copied from transformers.models.bert.tokenization_bert.load_vocab
|
32 |
+
def load_vocab(vocab_file):
|
33 |
+
"""Loads a vocabulary file into a dictionary."""
|
34 |
+
vocab = collections.OrderedDict()
|
35 |
+
with open(vocab_file, "r", encoding="utf-8") as reader:
|
36 |
+
tokens = reader.readlines()
|
37 |
+
for index, token in enumerate(tokens):
|
38 |
+
token = token.rstrip("\n")
|
39 |
+
vocab[token] = index
|
40 |
+
return vocab
|
41 |
+
|
42 |
+
|
43 |
+
# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
|
44 |
+
def whitespace_tokenize(text):
|
45 |
+
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
46 |
+
text = text.strip()
|
47 |
+
if not text:
|
48 |
+
return []
|
49 |
+
tokens = text.split()
|
50 |
+
return tokens
|
51 |
+
|
52 |
+
|
53 |
+
class RetriBertTokenizer(PreTrainedTokenizer):
|
54 |
+
r"""
|
55 |
+
Constructs a RetriBERT tokenizer.
|
56 |
+
|
57 |
+
[`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting
|
58 |
+
and wordpiece.
|
59 |
+
|
60 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
|
61 |
+
to: this superclass for more information regarding those methods.
|
62 |
+
|
63 |
+
Args:
|
64 |
+
vocab_file (`str`):
|
65 |
+
File containing the vocabulary.
|
66 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
67 |
+
Whether or not to lowercase the input when tokenizing.
|
68 |
+
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
|
69 |
+
Whether or not to do basic tokenization before WordPiece.
|
70 |
+
never_split (`Iterable`, *optional*):
|
71 |
+
Collection of tokens which will never be split during tokenization. Only has an effect when
|
72 |
+
`do_basic_tokenize=True`
|
73 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
74 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
75 |
+
token instead.
|
76 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
77 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
78 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
79 |
+
token of a sequence built with special tokens.
|
80 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
81 |
+
The token used for padding, for example when batching sequences of different lengths.
|
82 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
83 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
84 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
85 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
86 |
+
The token used for masking values. This is the token used when training this model with masked language
|
87 |
+
modeling. This is the token which the model will try to predict.
|
88 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
89 |
+
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
|
90 |
+
[issue](https://github.com/huggingface/transformers/issues/328)).
|
91 |
+
strip_accents (`bool`, *optional*):
|
92 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
93 |
+
value for `lowercase` (as in the original BERT).
|
94 |
+
"""
|
95 |
+
|
96 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
97 |
+
model_input_names = ["input_ids", "attention_mask"]
|
98 |
+
|
99 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__
|
100 |
+
def __init__(
|
101 |
+
self,
|
102 |
+
vocab_file,
|
103 |
+
do_lower_case=True,
|
104 |
+
do_basic_tokenize=True,
|
105 |
+
never_split=None,
|
106 |
+
unk_token="[UNK]",
|
107 |
+
sep_token="[SEP]",
|
108 |
+
pad_token="[PAD]",
|
109 |
+
cls_token="[CLS]",
|
110 |
+
mask_token="[MASK]",
|
111 |
+
tokenize_chinese_chars=True,
|
112 |
+
strip_accents=None,
|
113 |
+
**kwargs,
|
114 |
+
):
|
115 |
+
if not os.path.isfile(vocab_file):
|
116 |
+
raise ValueError(
|
117 |
+
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
|
118 |
+
" model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
|
119 |
+
)
|
120 |
+
self.vocab = load_vocab(vocab_file)
|
121 |
+
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
|
122 |
+
self.do_basic_tokenize = do_basic_tokenize
|
123 |
+
if do_basic_tokenize:
|
124 |
+
self.basic_tokenizer = BasicTokenizer(
|
125 |
+
do_lower_case=do_lower_case,
|
126 |
+
never_split=never_split,
|
127 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
128 |
+
strip_accents=strip_accents,
|
129 |
+
)
|
130 |
+
|
131 |
+
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
|
132 |
+
|
133 |
+
super().__init__(
|
134 |
+
do_lower_case=do_lower_case,
|
135 |
+
do_basic_tokenize=do_basic_tokenize,
|
136 |
+
never_split=never_split,
|
137 |
+
unk_token=unk_token,
|
138 |
+
sep_token=sep_token,
|
139 |
+
pad_token=pad_token,
|
140 |
+
cls_token=cls_token,
|
141 |
+
mask_token=mask_token,
|
142 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
143 |
+
strip_accents=strip_accents,
|
144 |
+
**kwargs,
|
145 |
+
)
|
146 |
+
|
147 |
+
@property
|
148 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
|
149 |
+
def do_lower_case(self):
|
150 |
+
return self.basic_tokenizer.do_lower_case
|
151 |
+
|
152 |
+
@property
|
153 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
|
154 |
+
def vocab_size(self):
|
155 |
+
return len(self.vocab)
|
156 |
+
|
157 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
|
158 |
+
def get_vocab(self):
|
159 |
+
return dict(self.vocab, **self.added_tokens_encoder)
|
160 |
+
|
161 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
|
162 |
+
def _tokenize(self, text, split_special_tokens=False):
|
163 |
+
split_tokens = []
|
164 |
+
if self.do_basic_tokenize:
|
165 |
+
for token in self.basic_tokenizer.tokenize(
|
166 |
+
text, never_split=self.all_special_tokens if not split_special_tokens else None
|
167 |
+
):
|
168 |
+
# If the token is part of the never_split set
|
169 |
+
if token in self.basic_tokenizer.never_split:
|
170 |
+
split_tokens.append(token)
|
171 |
+
else:
|
172 |
+
split_tokens += self.wordpiece_tokenizer.tokenize(token)
|
173 |
+
else:
|
174 |
+
split_tokens = self.wordpiece_tokenizer.tokenize(text)
|
175 |
+
return split_tokens
|
176 |
+
|
177 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
|
178 |
+
def _convert_token_to_id(self, token):
|
179 |
+
"""Converts a token (str) in an id using the vocab."""
|
180 |
+
return self.vocab.get(token, self.vocab.get(self.unk_token))
|
181 |
+
|
182 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
|
183 |
+
def _convert_id_to_token(self, index):
|
184 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
185 |
+
return self.ids_to_tokens.get(index, self.unk_token)
|
186 |
+
|
187 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
|
188 |
+
def convert_tokens_to_string(self, tokens):
|
189 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
190 |
+
out_string = " ".join(tokens).replace(" ##", "").strip()
|
191 |
+
return out_string
|
192 |
+
|
193 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
|
194 |
+
def build_inputs_with_special_tokens(
|
195 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
196 |
+
) -> List[int]:
|
197 |
+
"""
|
198 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
199 |
+
adding special tokens. A BERT sequence has the following format:
|
200 |
+
|
201 |
+
- single sequence: `[CLS] X [SEP]`
|
202 |
+
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
203 |
+
|
204 |
+
Args:
|
205 |
+
token_ids_0 (`List[int]`):
|
206 |
+
List of IDs to which the special tokens will be added.
|
207 |
+
token_ids_1 (`List[int]`, *optional*):
|
208 |
+
Optional second list of IDs for sequence pairs.
|
209 |
+
|
210 |
+
Returns:
|
211 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
212 |
+
"""
|
213 |
+
if token_ids_1 is None:
|
214 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
215 |
+
cls = [self.cls_token_id]
|
216 |
+
sep = [self.sep_token_id]
|
217 |
+
return cls + token_ids_0 + sep + token_ids_1 + sep
|
218 |
+
|
219 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
|
220 |
+
def get_special_tokens_mask(
|
221 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
222 |
+
) -> List[int]:
|
223 |
+
"""
|
224 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
225 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
226 |
+
|
227 |
+
Args:
|
228 |
+
token_ids_0 (`List[int]`):
|
229 |
+
List of IDs.
|
230 |
+
token_ids_1 (`List[int]`, *optional*):
|
231 |
+
Optional second list of IDs for sequence pairs.
|
232 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
233 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
234 |
+
|
235 |
+
Returns:
|
236 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
237 |
+
"""
|
238 |
+
|
239 |
+
if already_has_special_tokens:
|
240 |
+
return super().get_special_tokens_mask(
|
241 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
242 |
+
)
|
243 |
+
|
244 |
+
if token_ids_1 is not None:
|
245 |
+
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
246 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
247 |
+
|
248 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
|
249 |
+
def create_token_type_ids_from_sequences(
|
250 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
251 |
+
) -> List[int]:
|
252 |
+
"""
|
253 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
|
254 |
+
pair mask has the following format:
|
255 |
+
|
256 |
+
```
|
257 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
258 |
+
| first sequence | second sequence |
|
259 |
+
```
|
260 |
+
|
261 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
262 |
+
|
263 |
+
Args:
|
264 |
+
token_ids_0 (`List[int]`):
|
265 |
+
List of IDs.
|
266 |
+
token_ids_1 (`List[int]`, *optional*):
|
267 |
+
Optional second list of IDs for sequence pairs.
|
268 |
+
|
269 |
+
Returns:
|
270 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
271 |
+
"""
|
272 |
+
sep = [self.sep_token_id]
|
273 |
+
cls = [self.cls_token_id]
|
274 |
+
if token_ids_1 is None:
|
275 |
+
return len(cls + token_ids_0 + sep) * [0]
|
276 |
+
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
277 |
+
|
278 |
+
# Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
|
279 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
280 |
+
index = 0
|
281 |
+
if os.path.isdir(save_directory):
|
282 |
+
vocab_file = os.path.join(
|
283 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
284 |
+
)
|
285 |
+
else:
|
286 |
+
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
287 |
+
with open(vocab_file, "w", encoding="utf-8") as writer:
|
288 |
+
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
|
289 |
+
if index != token_index:
|
290 |
+
logger.warning(
|
291 |
+
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
292 |
+
" Please check that the vocabulary is not corrupted!"
|
293 |
+
)
|
294 |
+
index = token_index
|
295 |
+
writer.write(token + "\n")
|
296 |
+
index += 1
|
297 |
+
return (vocab_file,)
|
298 |
+
|
299 |
+
|
300 |
+
# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
|
301 |
+
class BasicTokenizer(object):
|
302 |
+
"""
|
303 |
+
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
|
304 |
+
|
305 |
+
Args:
|
306 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
307 |
+
Whether or not to lowercase the input when tokenizing.
|
308 |
+
never_split (`Iterable`, *optional*):
|
309 |
+
Collection of tokens which will never be split during tokenization. Only has an effect when
|
310 |
+
`do_basic_tokenize=True`
|
311 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
312 |
+
Whether or not to tokenize Chinese characters.
|
313 |
+
|
314 |
+
This should likely be deactivated for Japanese (see this
|
315 |
+
[issue](https://github.com/huggingface/transformers/issues/328)).
|
316 |
+
strip_accents (`bool`, *optional*):
|
317 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
318 |
+
value for `lowercase` (as in the original BERT).
|
319 |
+
do_split_on_punc (`bool`, *optional*, defaults to `True`):
|
320 |
+
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
|
321 |
+
the full context of the words, such as contractions.
|
322 |
+
"""
|
323 |
+
|
324 |
+
def __init__(
|
325 |
+
self,
|
326 |
+
do_lower_case=True,
|
327 |
+
never_split=None,
|
328 |
+
tokenize_chinese_chars=True,
|
329 |
+
strip_accents=None,
|
330 |
+
do_split_on_punc=True,
|
331 |
+
):
|
332 |
+
if never_split is None:
|
333 |
+
never_split = []
|
334 |
+
self.do_lower_case = do_lower_case
|
335 |
+
self.never_split = set(never_split)
|
336 |
+
self.tokenize_chinese_chars = tokenize_chinese_chars
|
337 |
+
self.strip_accents = strip_accents
|
338 |
+
self.do_split_on_punc = do_split_on_punc
|
339 |
+
|
340 |
+
def tokenize(self, text, never_split=None):
|
341 |
+
"""
|
342 |
+
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
|
343 |
+
|
344 |
+
Args:
|
345 |
+
never_split (`List[str]`, *optional*)
|
346 |
+
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
|
347 |
+
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
|
348 |
+
"""
|
349 |
+
# union() returns a new set by concatenating the two sets.
|
350 |
+
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
|
351 |
+
text = self._clean_text(text)
|
352 |
+
|
353 |
+
# This was added on November 1st, 2018 for the multilingual and Chinese
|
354 |
+
# models. This is also applied to the English models now, but it doesn't
|
355 |
+
# matter since the English models were not trained on any Chinese data
|
356 |
+
# and generally don't have any Chinese data in them (there are Chinese
|
357 |
+
# characters in the vocabulary because Wikipedia does have some Chinese
|
358 |
+
# words in the English Wikipedia.).
|
359 |
+
if self.tokenize_chinese_chars:
|
360 |
+
text = self._tokenize_chinese_chars(text)
|
361 |
+
# prevents treating the same character with different unicode codepoints as different characters
|
362 |
+
unicode_normalized_text = unicodedata.normalize("NFC", text)
|
363 |
+
orig_tokens = whitespace_tokenize(unicode_normalized_text)
|
364 |
+
split_tokens = []
|
365 |
+
for token in orig_tokens:
|
366 |
+
if token not in never_split:
|
367 |
+
if self.do_lower_case:
|
368 |
+
token = token.lower()
|
369 |
+
if self.strip_accents is not False:
|
370 |
+
token = self._run_strip_accents(token)
|
371 |
+
elif self.strip_accents:
|
372 |
+
token = self._run_strip_accents(token)
|
373 |
+
split_tokens.extend(self._run_split_on_punc(token, never_split))
|
374 |
+
|
375 |
+
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
376 |
+
return output_tokens
|
377 |
+
|
378 |
+
def _run_strip_accents(self, text):
|
379 |
+
"""Strips accents from a piece of text."""
|
380 |
+
text = unicodedata.normalize("NFD", text)
|
381 |
+
output = []
|
382 |
+
for char in text:
|
383 |
+
cat = unicodedata.category(char)
|
384 |
+
if cat == "Mn":
|
385 |
+
continue
|
386 |
+
output.append(char)
|
387 |
+
return "".join(output)
|
388 |
+
|
389 |
+
def _run_split_on_punc(self, text, never_split=None):
|
390 |
+
"""Splits punctuation on a piece of text."""
|
391 |
+
if not self.do_split_on_punc or (never_split is not None and text in never_split):
|
392 |
+
return [text]
|
393 |
+
chars = list(text)
|
394 |
+
i = 0
|
395 |
+
start_new_word = True
|
396 |
+
output = []
|
397 |
+
while i < len(chars):
|
398 |
+
char = chars[i]
|
399 |
+
if _is_punctuation(char):
|
400 |
+
output.append([char])
|
401 |
+
start_new_word = True
|
402 |
+
else:
|
403 |
+
if start_new_word:
|
404 |
+
output.append([])
|
405 |
+
start_new_word = False
|
406 |
+
output[-1].append(char)
|
407 |
+
i += 1
|
408 |
+
|
409 |
+
return ["".join(x) for x in output]
|
410 |
+
|
411 |
+
def _tokenize_chinese_chars(self, text):
|
412 |
+
"""Adds whitespace around any CJK character."""
|
413 |
+
output = []
|
414 |
+
for char in text:
|
415 |
+
cp = ord(char)
|
416 |
+
if self._is_chinese_char(cp):
|
417 |
+
output.append(" ")
|
418 |
+
output.append(char)
|
419 |
+
output.append(" ")
|
420 |
+
else:
|
421 |
+
output.append(char)
|
422 |
+
return "".join(output)
|
423 |
+
|
424 |
+
def _is_chinese_char(self, cp):
|
425 |
+
"""Checks whether CP is the codepoint of a CJK character."""
|
426 |
+
# This defines a "chinese character" as anything in the CJK Unicode block:
|
427 |
+
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
428 |
+
#
|
429 |
+
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
430 |
+
# despite its name. The modern Korean Hangul alphabet is a different block,
|
431 |
+
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
432 |
+
# space-separated words, so they are not treated specially and handled
|
433 |
+
# like the all of the other languages.
|
434 |
+
if (
|
435 |
+
(cp >= 0x4E00 and cp <= 0x9FFF)
|
436 |
+
or (cp >= 0x3400 and cp <= 0x4DBF) #
|
437 |
+
or (cp >= 0x20000 and cp <= 0x2A6DF) #
|
438 |
+
or (cp >= 0x2A700 and cp <= 0x2B73F) #
|
439 |
+
or (cp >= 0x2B740 and cp <= 0x2B81F) #
|
440 |
+
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
|
441 |
+
or (cp >= 0xF900 and cp <= 0xFAFF)
|
442 |
+
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
|
443 |
+
): #
|
444 |
+
return True
|
445 |
+
|
446 |
+
return False
|
447 |
+
|
448 |
+
def _clean_text(self, text):
|
449 |
+
"""Performs invalid character removal and whitespace cleanup on text."""
|
450 |
+
output = []
|
451 |
+
for char in text:
|
452 |
+
cp = ord(char)
|
453 |
+
if cp == 0 or cp == 0xFFFD or _is_control(char):
|
454 |
+
continue
|
455 |
+
if _is_whitespace(char):
|
456 |
+
output.append(" ")
|
457 |
+
else:
|
458 |
+
output.append(char)
|
459 |
+
return "".join(output)
|
460 |
+
|
461 |
+
|
462 |
+
# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
|
463 |
+
class WordpieceTokenizer(object):
|
464 |
+
"""Runs WordPiece tokenization."""
|
465 |
+
|
466 |
+
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
|
467 |
+
self.vocab = vocab
|
468 |
+
self.unk_token = unk_token
|
469 |
+
self.max_input_chars_per_word = max_input_chars_per_word
|
470 |
+
|
471 |
+
def tokenize(self, text):
|
472 |
+
"""
|
473 |
+
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
|
474 |
+
tokenization using the given vocabulary.
|
475 |
+
|
476 |
+
For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
|
477 |
+
|
478 |
+
Args:
|
479 |
+
text: A single token or whitespace separated tokens. This should have
|
480 |
+
already been passed through *BasicTokenizer*.
|
481 |
+
|
482 |
+
Returns:
|
483 |
+
A list of wordpiece tokens.
|
484 |
+
"""
|
485 |
+
|
486 |
+
output_tokens = []
|
487 |
+
for token in whitespace_tokenize(text):
|
488 |
+
chars = list(token)
|
489 |
+
if len(chars) > self.max_input_chars_per_word:
|
490 |
+
output_tokens.append(self.unk_token)
|
491 |
+
continue
|
492 |
+
|
493 |
+
is_bad = False
|
494 |
+
start = 0
|
495 |
+
sub_tokens = []
|
496 |
+
while start < len(chars):
|
497 |
+
end = len(chars)
|
498 |
+
cur_substr = None
|
499 |
+
while start < end:
|
500 |
+
substr = "".join(chars[start:end])
|
501 |
+
if start > 0:
|
502 |
+
substr = "##" + substr
|
503 |
+
if substr in self.vocab:
|
504 |
+
cur_substr = substr
|
505 |
+
break
|
506 |
+
end -= 1
|
507 |
+
if cur_substr is None:
|
508 |
+
is_bad = True
|
509 |
+
break
|
510 |
+
sub_tokens.append(cur_substr)
|
511 |
+
start = end
|
512 |
+
|
513 |
+
if is_bad:
|
514 |
+
output_tokens.append(self.unk_token)
|
515 |
+
else:
|
516 |
+
output_tokens.extend(sub_tokens)
|
517 |
+
return output_tokens
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for RetriBERT."""
|
16 |
+
|
17 |
+
import json
|
18 |
+
from typing import List, Optional, Tuple
|
19 |
+
|
20 |
+
from tokenizers import normalizers
|
21 |
+
|
22 |
+
from ....tokenization_utils_fast import PreTrainedTokenizerFast
|
23 |
+
from ....utils import logging
|
24 |
+
from .tokenization_retribert import RetriBertTokenizer
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
|
30 |
+
|
31 |
+
|
32 |
+
class RetriBertTokenizerFast(PreTrainedTokenizerFast):
|
33 |
+
r"""
|
34 |
+
Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library).
|
35 |
+
|
36 |
+
[`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
|
37 |
+
splitting and wordpiece.
|
38 |
+
|
39 |
+
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
40 |
+
refer to this superclass for more information regarding those methods.
|
41 |
+
|
42 |
+
Args:
|
43 |
+
vocab_file (`str`):
|
44 |
+
File containing the vocabulary.
|
45 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
46 |
+
Whether or not to lowercase the input when tokenizing.
|
47 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
48 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
49 |
+
token instead.
|
50 |
+
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
|
51 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
52 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
53 |
+
token of a sequence built with special tokens.
|
54 |
+
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
|
55 |
+
The token used for padding, for example when batching sequences of different lengths.
|
56 |
+
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
|
57 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
58 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
59 |
+
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
|
60 |
+
The token used for masking values. This is the token used when training this model with masked language
|
61 |
+
modeling. This is the token which the model will try to predict.
|
62 |
+
clean_text (`bool`, *optional*, defaults to `True`):
|
63 |
+
Whether or not to clean the text before tokenization by removing any control characters and replacing all
|
64 |
+
whitespaces by the classic one.
|
65 |
+
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
|
66 |
+
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
|
67 |
+
issue](https://github.com/huggingface/transformers/issues/328)).
|
68 |
+
strip_accents (`bool`, *optional*):
|
69 |
+
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
70 |
+
value for `lowercase` (as in the original BERT).
|
71 |
+
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
|
72 |
+
The prefix for subwords.
|
73 |
+
"""
|
74 |
+
|
75 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
76 |
+
slow_tokenizer_class = RetriBertTokenizer
|
77 |
+
model_input_names = ["input_ids", "attention_mask"]
|
78 |
+
|
79 |
+
# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__
|
80 |
+
def __init__(
|
81 |
+
self,
|
82 |
+
vocab_file=None,
|
83 |
+
tokenizer_file=None,
|
84 |
+
do_lower_case=True,
|
85 |
+
unk_token="[UNK]",
|
86 |
+
sep_token="[SEP]",
|
87 |
+
pad_token="[PAD]",
|
88 |
+
cls_token="[CLS]",
|
89 |
+
mask_token="[MASK]",
|
90 |
+
tokenize_chinese_chars=True,
|
91 |
+
strip_accents=None,
|
92 |
+
**kwargs,
|
93 |
+
):
|
94 |
+
super().__init__(
|
95 |
+
vocab_file,
|
96 |
+
tokenizer_file=tokenizer_file,
|
97 |
+
do_lower_case=do_lower_case,
|
98 |
+
unk_token=unk_token,
|
99 |
+
sep_token=sep_token,
|
100 |
+
pad_token=pad_token,
|
101 |
+
cls_token=cls_token,
|
102 |
+
mask_token=mask_token,
|
103 |
+
tokenize_chinese_chars=tokenize_chinese_chars,
|
104 |
+
strip_accents=strip_accents,
|
105 |
+
**kwargs,
|
106 |
+
)
|
107 |
+
|
108 |
+
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
|
109 |
+
if (
|
110 |
+
normalizer_state.get("lowercase", do_lower_case) != do_lower_case
|
111 |
+
or normalizer_state.get("strip_accents", strip_accents) != strip_accents
|
112 |
+
or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
|
113 |
+
):
|
114 |
+
normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
|
115 |
+
normalizer_state["lowercase"] = do_lower_case
|
116 |
+
normalizer_state["strip_accents"] = strip_accents
|
117 |
+
normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
|
118 |
+
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
|
119 |
+
|
120 |
+
self.do_lower_case = do_lower_case
|
121 |
+
|
122 |
+
# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
|
123 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
124 |
+
"""
|
125 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
126 |
+
adding special tokens. A BERT sequence has the following format:
|
127 |
+
|
128 |
+
- single sequence: `[CLS] X [SEP]`
|
129 |
+
- pair of sequences: `[CLS] A [SEP] B [SEP]`
|
130 |
+
|
131 |
+
Args:
|
132 |
+
token_ids_0 (`List[int]`):
|
133 |
+
List of IDs to which the special tokens will be added.
|
134 |
+
token_ids_1 (`List[int]`, *optional*):
|
135 |
+
Optional second list of IDs for sequence pairs.
|
136 |
+
|
137 |
+
Returns:
|
138 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
139 |
+
"""
|
140 |
+
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
141 |
+
|
142 |
+
if token_ids_1 is not None:
|
143 |
+
output += token_ids_1 + [self.sep_token_id]
|
144 |
+
|
145 |
+
return output
|
146 |
+
|
147 |
+
# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
|
148 |
+
def create_token_type_ids_from_sequences(
|
149 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
150 |
+
) -> List[int]:
|
151 |
+
"""
|
152 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
|
153 |
+
pair mask has the following format:
|
154 |
+
|
155 |
+
```
|
156 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
157 |
+
| first sequence | second sequence |
|
158 |
+
```
|
159 |
+
|
160 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
161 |
+
|
162 |
+
Args:
|
163 |
+
token_ids_0 (`List[int]`):
|
164 |
+
List of IDs.
|
165 |
+
token_ids_1 (`List[int]`, *optional*):
|
166 |
+
Optional second list of IDs for sequence pairs.
|
167 |
+
|
168 |
+
Returns:
|
169 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
170 |
+
"""
|
171 |
+
sep = [self.sep_token_id]
|
172 |
+
cls = [self.cls_token_id]
|
173 |
+
if token_ids_1 is None:
|
174 |
+
return len(cls + token_ids_0 + sep) * [0]
|
175 |
+
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
176 |
+
|
177 |
+
# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
|
178 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
179 |
+
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
|
180 |
+
return tuple(files)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
|
22 |
+
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
|
23 |
+
}
|
24 |
+
|
25 |
+
try:
|
26 |
+
if not is_torch_available():
|
27 |
+
raise OptionalDependencyNotAvailable()
|
28 |
+
except OptionalDependencyNotAvailable:
|
29 |
+
pass
|
30 |
+
else:
|
31 |
+
_import_structure["modeling_transfo_xl"] = [
|
32 |
+
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
|
33 |
+
"AdaptiveEmbedding",
|
34 |
+
"TransfoXLForSequenceClassification",
|
35 |
+
"TransfoXLLMHeadModel",
|
36 |
+
"TransfoXLModel",
|
37 |
+
"TransfoXLPreTrainedModel",
|
38 |
+
"load_tf_weights_in_transfo_xl",
|
39 |
+
]
|
40 |
+
|
41 |
+
try:
|
42 |
+
if not is_tf_available():
|
43 |
+
raise OptionalDependencyNotAvailable()
|
44 |
+
except OptionalDependencyNotAvailable:
|
45 |
+
pass
|
46 |
+
else:
|
47 |
+
_import_structure["modeling_tf_transfo_xl"] = [
|
48 |
+
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
|
49 |
+
"TFAdaptiveEmbedding",
|
50 |
+
"TFTransfoXLForSequenceClassification",
|
51 |
+
"TFTransfoXLLMHeadModel",
|
52 |
+
"TFTransfoXLMainLayer",
|
53 |
+
"TFTransfoXLModel",
|
54 |
+
"TFTransfoXLPreTrainedModel",
|
55 |
+
]
|
56 |
+
|
57 |
+
|
58 |
+
if TYPE_CHECKING:
|
59 |
+
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
|
60 |
+
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
|
61 |
+
|
62 |
+
try:
|
63 |
+
if not is_torch_available():
|
64 |
+
raise OptionalDependencyNotAvailable()
|
65 |
+
except OptionalDependencyNotAvailable:
|
66 |
+
pass
|
67 |
+
else:
|
68 |
+
from .modeling_transfo_xl import (
|
69 |
+
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
70 |
+
AdaptiveEmbedding,
|
71 |
+
TransfoXLForSequenceClassification,
|
72 |
+
TransfoXLLMHeadModel,
|
73 |
+
TransfoXLModel,
|
74 |
+
TransfoXLPreTrainedModel,
|
75 |
+
load_tf_weights_in_transfo_xl,
|
76 |
+
)
|
77 |
+
|
78 |
+
try:
|
79 |
+
if not is_tf_available():
|
80 |
+
raise OptionalDependencyNotAvailable()
|
81 |
+
except OptionalDependencyNotAvailable:
|
82 |
+
pass
|
83 |
+
else:
|
84 |
+
from .modeling_tf_transfo_xl import (
|
85 |
+
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
86 |
+
TFAdaptiveEmbedding,
|
87 |
+
TFTransfoXLForSequenceClassification,
|
88 |
+
TFTransfoXLLMHeadModel,
|
89 |
+
TFTransfoXLMainLayer,
|
90 |
+
TFTransfoXLModel,
|
91 |
+
TFTransfoXLPreTrainedModel,
|
92 |
+
)
|
93 |
+
|
94 |
+
else:
|
95 |
+
import sys
|
96 |
+
|
97 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.63 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc
ADDED
Binary file (6.82 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc
ADDED
Binary file (3.12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc
ADDED
Binary file (34.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc
ADDED
Binary file (4.15 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc
ADDED
Binary file (40.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc
ADDED
Binary file (6.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc
ADDED
Binary file (25.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""
|
17 |
+
Utilities for PyTorch Transformer XL model. Directly adapted from https://github.com/kimiyoung/transformer-xl.
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
import torch
|
22 |
+
from torch import nn
|
23 |
+
|
24 |
+
|
25 |
+
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
|
26 |
+
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
|
27 |
+
|
28 |
+
|
29 |
+
class ProjectedAdaptiveLogSoftmax(nn.Module):
|
30 |
+
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
|
31 |
+
super().__init__()
|
32 |
+
|
33 |
+
self.n_token = n_token
|
34 |
+
self.d_embed = d_embed
|
35 |
+
self.d_proj = d_proj
|
36 |
+
|
37 |
+
self.cutoffs = cutoffs + [n_token]
|
38 |
+
self.cutoff_ends = [0] + self.cutoffs
|
39 |
+
self.div_val = div_val
|
40 |
+
|
41 |
+
self.shortlist_size = self.cutoffs[0]
|
42 |
+
self.n_clusters = len(self.cutoffs) - 1
|
43 |
+
self.head_size = self.shortlist_size + self.n_clusters
|
44 |
+
|
45 |
+
if self.n_clusters > 0:
|
46 |
+
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
|
47 |
+
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
|
48 |
+
|
49 |
+
self.out_layers = nn.ModuleList()
|
50 |
+
self.out_projs = nn.ParameterList()
|
51 |
+
|
52 |
+
if div_val == 1:
|
53 |
+
for i in range(len(self.cutoffs)):
|
54 |
+
if d_proj != d_embed:
|
55 |
+
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
|
56 |
+
else:
|
57 |
+
self.out_projs.append(None)
|
58 |
+
|
59 |
+
self.out_layers.append(nn.Linear(d_embed, n_token))
|
60 |
+
else:
|
61 |
+
for i in range(len(self.cutoffs)):
|
62 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
63 |
+
d_emb_i = d_embed // (div_val**i)
|
64 |
+
|
65 |
+
self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
|
66 |
+
|
67 |
+
self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
|
68 |
+
|
69 |
+
self.keep_order = keep_order
|
70 |
+
|
71 |
+
def _compute_logit(self, hidden, weight, bias, proj):
|
72 |
+
if proj is None:
|
73 |
+
logit = nn.functional.linear(hidden, weight, bias=bias)
|
74 |
+
else:
|
75 |
+
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
|
76 |
+
proj_hid = nn.functional.linear(hidden, proj.t().contiguous())
|
77 |
+
logit = nn.functional.linear(proj_hid, weight, bias=bias)
|
78 |
+
# else:
|
79 |
+
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
|
80 |
+
# if bias is not None:
|
81 |
+
# logit = logit + bias
|
82 |
+
|
83 |
+
return logit
|
84 |
+
|
85 |
+
def forward(self, hidden, labels=None, keep_order=False):
|
86 |
+
"""
|
87 |
+
Params:
|
88 |
+
hidden :: [len*bsz x d_proj]
|
89 |
+
labels :: [len*bsz]
|
90 |
+
|
91 |
+
Return:
|
92 |
+
if labels is None: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary else: out ::
|
93 |
+
[(len-1)*bsz] Negative log likelihood. We could replace this implementation by the native PyTorch one if
|
94 |
+
theirs had an option to set bias on all clusters in the native one. here:
|
95 |
+
https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
|
96 |
+
"""
|
97 |
+
|
98 |
+
if labels is not None:
|
99 |
+
# Shift so that tokens < n predict n
|
100 |
+
hidden = hidden[..., :-1, :].contiguous()
|
101 |
+
labels = labels[..., 1:].contiguous()
|
102 |
+
hidden = hidden.view(-1, hidden.size(-1))
|
103 |
+
labels = labels.view(-1)
|
104 |
+
if hidden.size(0) != labels.size(0):
|
105 |
+
raise RuntimeError("Input and labels should have the same size in the batch dimension.")
|
106 |
+
else:
|
107 |
+
hidden = hidden.view(-1, hidden.size(-1))
|
108 |
+
|
109 |
+
if self.n_clusters == 0:
|
110 |
+
logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
|
111 |
+
if labels is not None:
|
112 |
+
mask = labels != -100
|
113 |
+
out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
|
114 |
+
out[mask] = (
|
115 |
+
-nn.functional.log_softmax(logit, dim=-1)[mask].gather(1, labels[mask].unsqueeze(1)).squeeze(1)
|
116 |
+
)
|
117 |
+
else:
|
118 |
+
out = nn.functional.log_softmax(logit, dim=-1)
|
119 |
+
else:
|
120 |
+
# construct weights and biases
|
121 |
+
weights, biases = [], []
|
122 |
+
for i in range(len(self.cutoffs)):
|
123 |
+
if self.div_val == 1:
|
124 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
125 |
+
weight_i = self.out_layers[0].weight[l_idx:r_idx]
|
126 |
+
bias_i = self.out_layers[0].bias[l_idx:r_idx]
|
127 |
+
else:
|
128 |
+
weight_i = self.out_layers[i].weight
|
129 |
+
bias_i = self.out_layers[i].bias
|
130 |
+
|
131 |
+
if i == 0:
|
132 |
+
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
|
133 |
+
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
|
134 |
+
|
135 |
+
weights.append(weight_i)
|
136 |
+
biases.append(bias_i)
|
137 |
+
|
138 |
+
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
|
139 |
+
|
140 |
+
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
|
141 |
+
head_logprob = nn.functional.log_softmax(head_logit, dim=1)
|
142 |
+
|
143 |
+
if labels is None:
|
144 |
+
out = hidden.new_empty((head_logit.size(0), self.n_token))
|
145 |
+
else:
|
146 |
+
out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
|
147 |
+
|
148 |
+
offset = 0
|
149 |
+
cutoff_values = [0] + self.cutoffs
|
150 |
+
for i in range(len(cutoff_values) - 1):
|
151 |
+
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
|
152 |
+
|
153 |
+
if labels is not None:
|
154 |
+
mask_i = (labels >= l_idx) & (labels < r_idx)
|
155 |
+
indices_i = mask_i.nonzero().squeeze()
|
156 |
+
|
157 |
+
if indices_i.numel() == 0:
|
158 |
+
continue
|
159 |
+
|
160 |
+
target_i = labels.index_select(0, indices_i) - l_idx
|
161 |
+
head_logprob_i = head_logprob.index_select(0, indices_i)
|
162 |
+
hidden_i = hidden.index_select(0, indices_i)
|
163 |
+
else:
|
164 |
+
hidden_i = hidden
|
165 |
+
|
166 |
+
if i == 0:
|
167 |
+
if labels is not None:
|
168 |
+
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
|
169 |
+
else:
|
170 |
+
out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
|
171 |
+
else:
|
172 |
+
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
|
173 |
+
|
174 |
+
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
|
175 |
+
tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
|
176 |
+
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
|
177 |
+
if labels is not None:
|
178 |
+
logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
|
179 |
+
1, target_i[:, None]
|
180 |
+
).squeeze(1)
|
181 |
+
else:
|
182 |
+
logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
|
183 |
+
out[:, l_idx:r_idx] = logprob_i
|
184 |
+
|
185 |
+
if labels is not None:
|
186 |
+
if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
|
187 |
+
out.index_copy_(0, indices_i, -logprob_i)
|
188 |
+
else:
|
189 |
+
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
|
190 |
+
offset += logprob_i.size(0)
|
191 |
+
|
192 |
+
return out
|
193 |
+
|
194 |
+
def log_prob(self, hidden):
|
195 |
+
r"""
|
196 |
+
Computes log probabilities for all \\(n\_classes\\) From:
|
197 |
+
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.p
|
198 |
+
|
199 |
+
Args:
|
200 |
+
hidden (Tensor): a minibatch of example
|
201 |
+
|
202 |
+
Returns:
|
203 |
+
log-probabilities of for each class \\(c\\) in range \\(0 <= c <= n\_classes\\), where \\(n\_classes\\) is
|
204 |
+
a parameter passed to `AdaptiveLogSoftmaxWithLoss` constructor. Shape:
|
205 |
+
|
206 |
+
- Input: \\((N, in\_features)\\)
|
207 |
+
- Output: \\((N, n\_classes)\\)
|
208 |
+
"""
|
209 |
+
if self.n_clusters == 0:
|
210 |
+
logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
|
211 |
+
return nn.functional.log_softmax(logit, dim=-1)
|
212 |
+
else:
|
213 |
+
# construct weights and biases
|
214 |
+
weights, biases = [], []
|
215 |
+
for i in range(len(self.cutoffs)):
|
216 |
+
if self.div_val == 1:
|
217 |
+
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
|
218 |
+
weight_i = self.out_layers[0].weight[l_idx:r_idx]
|
219 |
+
bias_i = self.out_layers[0].bias[l_idx:r_idx]
|
220 |
+
else:
|
221 |
+
weight_i = self.out_layers[i].weight
|
222 |
+
bias_i = self.out_layers[i].bias
|
223 |
+
|
224 |
+
if i == 0:
|
225 |
+
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
|
226 |
+
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
|
227 |
+
|
228 |
+
weights.append(weight_i)
|
229 |
+
biases.append(bias_i)
|
230 |
+
|
231 |
+
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
|
232 |
+
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
|
233 |
+
|
234 |
+
out = hidden.new_empty((head_logit.size(0), self.n_token))
|
235 |
+
head_logprob = nn.functional.log_softmax(head_logit, dim=1)
|
236 |
+
|
237 |
+
cutoff_values = [0] + self.cutoffs
|
238 |
+
for i in range(len(cutoff_values) - 1):
|
239 |
+
start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
|
240 |
+
|
241 |
+
if i == 0:
|
242 |
+
out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
|
243 |
+
else:
|
244 |
+
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
|
245 |
+
|
246 |
+
tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
|
247 |
+
tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
|
248 |
+
|
249 |
+
logprob_i = head_logprob[:, -i] + tail_logprob_i
|
250 |
+
out[:, start_idx, stop_idx] = logprob_i
|
251 |
+
|
252 |
+
return out
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
ADDED
@@ -0,0 +1,819 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""
|
17 |
+
Tokenization classes for Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl.
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
import glob
|
22 |
+
import os
|
23 |
+
import pickle
|
24 |
+
import re
|
25 |
+
from collections import Counter, OrderedDict
|
26 |
+
from typing import List, Optional, Tuple
|
27 |
+
|
28 |
+
import numpy as np
|
29 |
+
|
30 |
+
from ....tokenization_utils import PreTrainedTokenizer
|
31 |
+
from ....utils import (
|
32 |
+
cached_file,
|
33 |
+
is_sacremoses_available,
|
34 |
+
is_torch_available,
|
35 |
+
logging,
|
36 |
+
requires_backends,
|
37 |
+
strtobool,
|
38 |
+
torch_only_method,
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
if is_sacremoses_available():
|
43 |
+
import sacremoses as sm
|
44 |
+
|
45 |
+
|
46 |
+
if is_torch_available():
|
47 |
+
import torch
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
VOCAB_FILES_NAMES = {
|
53 |
+
"pretrained_vocab_file": "vocab.pkl",
|
54 |
+
"pretrained_vocab_file_torch": "vocab.bin",
|
55 |
+
"vocab_file": "vocab.txt",
|
56 |
+
}
|
57 |
+
|
58 |
+
|
59 |
+
PRETRAINED_CORPUS_ARCHIVE_MAP = {
|
60 |
+
"transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/corpus.bin",
|
61 |
+
}
|
62 |
+
CORPUS_NAME = "corpus.bin"
|
63 |
+
|
64 |
+
MATCH_NUMBERS = r"(?<=\d)[,.](?=\d)", r" @\g<0>@ "
|
65 |
+
DETOKENIZE_NUMBERS = [(r" @\,@ ", r","), (r" @\.@ ", r".")]
|
66 |
+
|
67 |
+
|
68 |
+
def tokenize_numbers(text_array: List[str]) -> List[str]:
|
69 |
+
"""
|
70 |
+
Splits large comma-separated numbers and floating point values. This is done by replacing commas with ' @,@ ' and
|
71 |
+
dots with ' @.@ '.
|
72 |
+
|
73 |
+
Args:
|
74 |
+
text_array: An already tokenized text as list.
|
75 |
+
|
76 |
+
Returns:
|
77 |
+
A list of strings with tokenized numbers.
|
78 |
+
|
79 |
+
Example:
|
80 |
+
|
81 |
+
```python
|
82 |
+
>>> tokenize_numbers(["$", "5,000", "1.73", "m"])
|
83 |
+
['$', '5', '@,@', '000', '1', '@.@', '73', 'm']
|
84 |
+
```"""
|
85 |
+
tokenized = []
|
86 |
+
for i in range(len(text_array)):
|
87 |
+
reg, sub = MATCH_NUMBERS
|
88 |
+
replaced = re.sub(reg, sub, text_array[i]).split()
|
89 |
+
tokenized.extend(replaced)
|
90 |
+
|
91 |
+
return tokenized
|
92 |
+
|
93 |
+
|
94 |
+
def detokenize_numbers(text: str) -> str:
|
95 |
+
"""
|
96 |
+
Inverts the operation of *tokenize_numbers*. This is replacing ' @,@ ' and ' @.@' by ',' and '.'.
|
97 |
+
|
98 |
+
Args:
|
99 |
+
text: A string where the number should be detokenized.
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
A detokenized string.
|
103 |
+
|
104 |
+
Example:
|
105 |
+
|
106 |
+
```python
|
107 |
+
>>> detokenize_numbers("$ 5 @,@ 000 1 @.@ 73 m")
|
108 |
+
'$ 5,000 1.73 m'
|
109 |
+
```"""
|
110 |
+
for reg, sub in DETOKENIZE_NUMBERS:
|
111 |
+
text = re.sub(reg, sub, text)
|
112 |
+
return text
|
113 |
+
|
114 |
+
|
115 |
+
class TransfoXLTokenizer(PreTrainedTokenizer):
|
116 |
+
"""
|
117 |
+
Construct a Transformer-XL tokenizer adapted from Vocab class in [the original
|
118 |
+
code](https://github.com/kimiyoung/transformer-xl). The Transformer-XL tokenizer is a word-level tokenizer (no
|
119 |
+
sub-word tokenization).
|
120 |
+
|
121 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
122 |
+
this superclass for more information regarding those methods.
|
123 |
+
|
124 |
+
Args:
|
125 |
+
special (`List[str]`, *optional*):
|
126 |
+
A list of special tokens (to be treated by the original implementation of this tokenizer).
|
127 |
+
min_freq (`int`, *optional*, defaults to 0):
|
128 |
+
The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it
|
129 |
+
will be mapped to `unk_token`).
|
130 |
+
max_size (`int`, *optional*):
|
131 |
+
The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found
|
132 |
+
after excluding the tokens according to the `min_freq` rule.
|
133 |
+
lower_case (`bool`, *optional*, defaults to `False`):
|
134 |
+
Whether or not to lowercase the input when tokenizing.
|
135 |
+
delimiter (`str`, *optional*):
|
136 |
+
The delimiter used between tokens.
|
137 |
+
vocab_file (`str`, *optional*):
|
138 |
+
File containing the vocabulary (from the original implementation).
|
139 |
+
pretrained_vocab_file (`str`, *optional*):
|
140 |
+
File containing the vocabulary as saved with the `save_pretrained()` method.
|
141 |
+
never_split (`List[str]`, *optional*):
|
142 |
+
List of tokens that should never be split. If no list is specified, will simply use the existing special
|
143 |
+
tokens.
|
144 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
145 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
146 |
+
token instead.
|
147 |
+
eos_token (`str`, *optional*, defaults to `"<eos>"`):
|
148 |
+
The end of sequence token.
|
149 |
+
additional_special_tokens (`List[str]`, *optional*, defaults to `['<formula>']`):
|
150 |
+
A list of additional special tokens (for the HuggingFace functionality).
|
151 |
+
language (`str`, *optional*, defaults to `"en"`):
|
152 |
+
The language of this tokenizer (used for mose preprocessing).
|
153 |
+
"""
|
154 |
+
|
155 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
156 |
+
model_input_names = ["input_ids"]
|
157 |
+
|
158 |
+
def __init__(
|
159 |
+
self,
|
160 |
+
special=None,
|
161 |
+
min_freq=0,
|
162 |
+
max_size=None,
|
163 |
+
lower_case=False,
|
164 |
+
delimiter=None,
|
165 |
+
vocab_file=None,
|
166 |
+
pretrained_vocab_file: str = None,
|
167 |
+
never_split=None,
|
168 |
+
unk_token="<unk>",
|
169 |
+
eos_token="<eos>",
|
170 |
+
additional_special_tokens=["<formula>"],
|
171 |
+
language="en",
|
172 |
+
**kwargs,
|
173 |
+
):
|
174 |
+
logger.error(
|
175 |
+
"`TransfoXL` was deprecated due to security issues linked to `pickle.load` in `TransfoXLTokenizer`. "
|
176 |
+
"See more details on this model's documentation page: "
|
177 |
+
"`https://github.com/huggingface/transformers/blob/main/docs/source/en/model_doc/transfo-xl.md`."
|
178 |
+
)
|
179 |
+
|
180 |
+
requires_backends(self, "sacremoses")
|
181 |
+
if special is None:
|
182 |
+
special = []
|
183 |
+
self.counter = Counter()
|
184 |
+
self.special = special
|
185 |
+
self.min_freq = min_freq
|
186 |
+
self.max_size = max_size
|
187 |
+
self.lower_case = lower_case
|
188 |
+
self.delimiter = delimiter
|
189 |
+
self.vocab_file = vocab_file
|
190 |
+
self.punctuation_symbols = '!"#$%&()*+,-./\\:;<=>?@[\\]^_`{|}~'
|
191 |
+
self.punction_without_space_before_pattern = re.compile(rf"[^\s][{self.punctuation_symbols}]")
|
192 |
+
self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern()
|
193 |
+
self.language = language
|
194 |
+
self.moses_punct_normalizer = sm.MosesPunctNormalizer(language)
|
195 |
+
self.moses_tokenizer = sm.MosesTokenizer(language)
|
196 |
+
self.moses_detokenizer = sm.MosesDetokenizer(language)
|
197 |
+
self.idx2sym = []
|
198 |
+
self.sym2idx = OrderedDict()
|
199 |
+
# This try... catch... is not beautiful but honestly this tokenizer was not made to be used
|
200 |
+
# in a library like ours, at all.
|
201 |
+
try:
|
202 |
+
vocab_dict = None
|
203 |
+
if pretrained_vocab_file is not None:
|
204 |
+
# Priority on pickle files (support PyTorch and TF)
|
205 |
+
if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
|
206 |
+
raise ValueError(
|
207 |
+
"This part uses `pickle.load` which is insecure and will execute arbitrary code that is "
|
208 |
+
"potentially malicious. It's recommended to never unpickle data that could have come from an "
|
209 |
+
"untrusted source, or that could have been tampered with. If you already verified the pickle "
|
210 |
+
"data and decided to use it, you can set the environment variable "
|
211 |
+
"`TRUST_REMOTE_CODE` to `True` to allow it."
|
212 |
+
)
|
213 |
+
with open(pretrained_vocab_file, "rb") as f:
|
214 |
+
vocab_dict = pickle.load(f)
|
215 |
+
|
216 |
+
# Loading a torch-saved transfo-xl vocab dict with pickle results in an integer
|
217 |
+
# Entering this if statement means that we tried to load a torch-saved file with pickle, and we failed.
|
218 |
+
# We therefore load it with torch, if it's available.
|
219 |
+
if isinstance(vocab_dict, int):
|
220 |
+
if not is_torch_available():
|
221 |
+
raise ImportError(
|
222 |
+
"Not trying to load dict with PyTorch as you need to install pytorch to load "
|
223 |
+
"from a PyTorch pretrained vocabulary, "
|
224 |
+
"or activate it with environment variables USE_TORCH=1 and USE_TF=0."
|
225 |
+
)
|
226 |
+
vocab_dict = torch.load(pretrained_vocab_file)
|
227 |
+
|
228 |
+
if vocab_dict is not None:
|
229 |
+
for key, value in vocab_dict.items():
|
230 |
+
if key not in self.__dict__ or key in ["sym2idx", "idx2sym"]:
|
231 |
+
self.__dict__[key] = value
|
232 |
+
elif vocab_file is not None:
|
233 |
+
self.build_vocab()
|
234 |
+
|
235 |
+
except Exception as e:
|
236 |
+
raise ValueError(
|
237 |
+
f"Unable to parse file {pretrained_vocab_file}. Unknown format. "
|
238 |
+
"If you tried to load a model saved through TransfoXLTokenizerFast, "
|
239 |
+
"please note they are not compatible."
|
240 |
+
) from e
|
241 |
+
|
242 |
+
if vocab_file is not None:
|
243 |
+
self.build_vocab()
|
244 |
+
|
245 |
+
super().__init__(
|
246 |
+
special=special,
|
247 |
+
min_freq=min_freq,
|
248 |
+
max_size=max_size,
|
249 |
+
lower_case=lower_case,
|
250 |
+
delimiter=delimiter,
|
251 |
+
vocab_file=vocab_file,
|
252 |
+
pretrained_vocab_file=pretrained_vocab_file,
|
253 |
+
never_split=never_split,
|
254 |
+
unk_token=unk_token,
|
255 |
+
eos_token=eos_token,
|
256 |
+
additional_special_tokens=additional_special_tokens,
|
257 |
+
language=language,
|
258 |
+
**kwargs,
|
259 |
+
)
|
260 |
+
|
261 |
+
# these are not required to initialize the parent class as only used when tokenizing.
|
262 |
+
if never_split is None:
|
263 |
+
never_split = self.all_special_tokens
|
264 |
+
self.never_split = never_split
|
265 |
+
|
266 |
+
@property
|
267 |
+
def do_lower_case(self):
|
268 |
+
return self.lower_case
|
269 |
+
|
270 |
+
def _compile_space_around_punctuation_pattern(self):
|
271 |
+
look_ahead_for_special_token = f"(?=[{self.punctuation_symbols}])"
|
272 |
+
look_ahead_to_match_all_except_space = r"(?=[^\s])"
|
273 |
+
return re.compile(r"" + look_ahead_for_special_token + look_ahead_to_match_all_except_space)
|
274 |
+
|
275 |
+
def count_file(self, path, verbose=False, add_eos=False):
|
276 |
+
if verbose:
|
277 |
+
logger.info(f"counting file {path} ...")
|
278 |
+
assert os.path.exists(path), f"Input file {path} not found"
|
279 |
+
|
280 |
+
sents = []
|
281 |
+
with open(path, "r", encoding="utf-8") as f:
|
282 |
+
for idx, line in enumerate(f):
|
283 |
+
if verbose and idx > 0 and idx % 500000 == 0:
|
284 |
+
logger.info(f" line {idx}")
|
285 |
+
symbols = self.tokenize(line, add_eos=add_eos)
|
286 |
+
self.counter.update(symbols)
|
287 |
+
sents.append(symbols)
|
288 |
+
|
289 |
+
return sents
|
290 |
+
|
291 |
+
def count_sents(self, sents, verbose=False):
|
292 |
+
"""
|
293 |
+
sents : a list of sentences, each a list of tokenized symbols
|
294 |
+
"""
|
295 |
+
if verbose:
|
296 |
+
logger.info(f"counting {len(sents)} sents ...")
|
297 |
+
for idx, symbols in enumerate(sents):
|
298 |
+
if verbose and idx > 0 and idx % 500000 == 0:
|
299 |
+
logger.info(f" line {idx}")
|
300 |
+
self.counter.update(symbols)
|
301 |
+
|
302 |
+
def _build_from_file(self, vocab_file):
|
303 |
+
self.idx2sym = []
|
304 |
+
self.sym2idx = OrderedDict()
|
305 |
+
|
306 |
+
with open(vocab_file, "r", encoding="utf-8") as f:
|
307 |
+
for line in f:
|
308 |
+
symb = line.strip().split()[0]
|
309 |
+
self.add_symbol(symb)
|
310 |
+
if "<UNK>" in self.sym2idx:
|
311 |
+
self.unk_idx = self.sym2idx["<UNK>"]
|
312 |
+
elif "<unk>" in self.sym2idx:
|
313 |
+
self.unk_idx = self.sym2idx["<unk>"]
|
314 |
+
else:
|
315 |
+
raise ValueError("Token not in vocabulary and no <unk> token in vocabulary for replacement.")
|
316 |
+
|
317 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
318 |
+
if os.path.isdir(save_directory):
|
319 |
+
vocab_file = os.path.join(
|
320 |
+
save_directory,
|
321 |
+
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["pretrained_vocab_file"],
|
322 |
+
)
|
323 |
+
else:
|
324 |
+
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
325 |
+
with open(vocab_file, "wb") as f:
|
326 |
+
pickle.dump(self.__dict__, f)
|
327 |
+
return (vocab_file,)
|
328 |
+
|
329 |
+
def build_vocab(self):
|
330 |
+
if self.vocab_file:
|
331 |
+
logger.info(f"building vocab from {self.vocab_file}")
|
332 |
+
self._build_from_file(self.vocab_file)
|
333 |
+
logger.info(f"Final vocab size {len(self.sym2idx)}")
|
334 |
+
else:
|
335 |
+
logger.info(f"building vocab with min_freq={self.min_freq}, max_size={self.max_size}")
|
336 |
+
self.idx2sym = []
|
337 |
+
self.sym2idx = OrderedDict()
|
338 |
+
|
339 |
+
for sym in self.special:
|
340 |
+
self.add_special(sym)
|
341 |
+
|
342 |
+
for sym, cnt in self.counter.most_common(self.max_size):
|
343 |
+
if cnt < self.min_freq:
|
344 |
+
break
|
345 |
+
self.add_symbol(sym)
|
346 |
+
|
347 |
+
logger.info(f"Final vocab size {len(self.sym2idx)} from {len(self.counter)} unique tokens")
|
348 |
+
|
349 |
+
@torch_only_method
|
350 |
+
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False):
|
351 |
+
if verbose:
|
352 |
+
logger.info(f"encoding file {path} ...")
|
353 |
+
assert os.path.exists(path), f"Output file {path} not found"
|
354 |
+
encoded = []
|
355 |
+
with open(path, "r", encoding="utf-8") as f:
|
356 |
+
for idx, line in enumerate(f):
|
357 |
+
if verbose and idx > 0 and idx % 500000 == 0:
|
358 |
+
logger.info(f" line {idx}")
|
359 |
+
symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos)
|
360 |
+
encoded.append(self.convert_to_tensor(symbols))
|
361 |
+
|
362 |
+
if ordered:
|
363 |
+
encoded = torch.cat(encoded)
|
364 |
+
|
365 |
+
return encoded
|
366 |
+
|
367 |
+
@torch_only_method
|
368 |
+
def encode_sents(self, sents, ordered=False, verbose=False):
|
369 |
+
if verbose:
|
370 |
+
logger.info(f"encoding {len(sents)} sents ...")
|
371 |
+
encoded = []
|
372 |
+
for idx, symbols in enumerate(sents):
|
373 |
+
if verbose and idx > 0 and idx % 500000 == 0:
|
374 |
+
logger.info(f" line {idx}")
|
375 |
+
encoded.append(self.convert_to_tensor(symbols))
|
376 |
+
|
377 |
+
if ordered:
|
378 |
+
encoded = torch.cat(encoded)
|
379 |
+
|
380 |
+
return encoded
|
381 |
+
|
382 |
+
def add_special(self, sym):
|
383 |
+
if sym not in self.sym2idx:
|
384 |
+
self.idx2sym.append(sym)
|
385 |
+
self.sym2idx[sym] = len(self.idx2sym) - 1
|
386 |
+
setattr(self, f"{sym.strip('<>')}_idx", self.sym2idx[sym])
|
387 |
+
|
388 |
+
def add_symbol(self, sym):
|
389 |
+
if sym not in self.sym2idx:
|
390 |
+
self.idx2sym.append(sym)
|
391 |
+
self.sym2idx[sym] = len(self.idx2sym) - 1
|
392 |
+
|
393 |
+
def move_added_token(self, token: str, target_idx: int):
|
394 |
+
"""
|
395 |
+
Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding
|
396 |
+
layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the
|
397 |
+
default position (at the very end) to the desired one.
|
398 |
+
|
399 |
+
Args:
|
400 |
+
token: The token to move to a specific position in the vocab.
|
401 |
+
target_idx: The position where the token should be moved to.
|
402 |
+
"""
|
403 |
+
assert token in self.added_tokens_encoder, "Token which should be moved has to be an added token"
|
404 |
+
assert token not in self.idx2sym, "Token which should be moved is already in vocab"
|
405 |
+
|
406 |
+
# Insert sym into vocab
|
407 |
+
self.idx2sym.insert(target_idx, token)
|
408 |
+
self.sym2idx[token] = target_idx
|
409 |
+
|
410 |
+
# Shift following indices in sym2idx
|
411 |
+
for idx in range(target_idx + 1, len(self.idx2sym)):
|
412 |
+
current_sym = self.idx2sym[idx]
|
413 |
+
self.sym2idx[current_sym] = idx
|
414 |
+
|
415 |
+
# Delete token from added_tokens
|
416 |
+
old_index = self._added_tokens_encoder.pop(token)
|
417 |
+
self._added_tokens_decoder.pop(old_index)
|
418 |
+
|
419 |
+
def moses_punct_norm(self, text):
|
420 |
+
return self.moses_punct_normalizer.normalize(text)
|
421 |
+
|
422 |
+
def moses_tokenize(self, text):
|
423 |
+
return self.moses_tokenizer.tokenize(
|
424 |
+
text, aggressive_dash_splits=True, return_str=False, escape=False, protected_patterns=self.never_split
|
425 |
+
)
|
426 |
+
|
427 |
+
def moses_pipeline(self, text: str) -> List[str]:
|
428 |
+
"""
|
429 |
+
Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with
|
430 |
+
*aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large
|
431 |
+
comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000
|
432 |
+
people are 1 @.@ 80m tall"
|
433 |
+
|
434 |
+
Args:
|
435 |
+
text: Text to be tokenize
|
436 |
+
|
437 |
+
Returns:
|
438 |
+
A list of tokenized string
|
439 |
+
|
440 |
+
Example:
|
441 |
+
|
442 |
+
```python
|
443 |
+
>>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103")
|
444 |
+
>>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall")
|
445 |
+
['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall']
|
446 |
+
```"""
|
447 |
+
text = self.moses_punct_norm(text)
|
448 |
+
text = self.moses_tokenize(text)
|
449 |
+
text = tokenize_numbers(text)
|
450 |
+
return text
|
451 |
+
|
452 |
+
def _convert_id_to_token(self, idx):
|
453 |
+
"""Converts an id in a token (BPE) using the vocab."""
|
454 |
+
assert 0 <= idx < len(self), f"Index {idx} out of vocabulary range"
|
455 |
+
return self.idx2sym[idx]
|
456 |
+
|
457 |
+
def _convert_token_to_id(self, sym):
|
458 |
+
"""Converts a token (str) in an id using the vocab."""
|
459 |
+
if sym in self.sym2idx:
|
460 |
+
return self.sym2idx[sym]
|
461 |
+
else:
|
462 |
+
# logger.info(f'encounter unk {sym}')
|
463 |
+
# assert '<eos>' not in sym
|
464 |
+
if hasattr(self, "unk_idx"):
|
465 |
+
return self.sym2idx.get(sym, self.unk_idx)
|
466 |
+
# Backward compatibility with pre-trained models
|
467 |
+
elif "<unk>" in self.sym2idx:
|
468 |
+
return self.sym2idx["<unk>"]
|
469 |
+
elif "<UNK>" in self.sym2idx:
|
470 |
+
return self.sym2idx["<UNK>"]
|
471 |
+
else:
|
472 |
+
raise ValueError("Token not in vocabulary and no <unk> token in vocabulary for replacement.")
|
473 |
+
|
474 |
+
def convert_tokens_to_string(self, tokens):
|
475 |
+
"""
|
476 |
+
Converts a sequence of tokens (string) in a single string. Additionally, the split numbers are converted back
|
477 |
+
into it's original form.
|
478 |
+
"""
|
479 |
+
out_string = self.moses_detokenizer.detokenize(tokens)
|
480 |
+
return detokenize_numbers(out_string).strip()
|
481 |
+
|
482 |
+
@torch_only_method
|
483 |
+
def convert_to_tensor(self, symbols):
|
484 |
+
return torch.LongTensor(self.convert_tokens_to_ids(symbols))
|
485 |
+
|
486 |
+
@property
|
487 |
+
def vocab_size(self):
|
488 |
+
return len(self.idx2sym)
|
489 |
+
|
490 |
+
def get_vocab(self):
|
491 |
+
vocab = self.sym2idx.copy()
|
492 |
+
vocab.update(self.added_tokens_encoder)
|
493 |
+
return vocab
|
494 |
+
|
495 |
+
def _tokenize(self, line, add_eos=False, add_double_eos=False):
|
496 |
+
line = line.strip()
|
497 |
+
# convert to lower case
|
498 |
+
if self.lower_case:
|
499 |
+
line = line.lower()
|
500 |
+
|
501 |
+
# empty delimiter '' will evaluate False
|
502 |
+
if self.delimiter == "":
|
503 |
+
symbols = line
|
504 |
+
else:
|
505 |
+
symbols = self.moses_pipeline(line)
|
506 |
+
|
507 |
+
if add_double_eos: # lm1b
|
508 |
+
return ["<S>"] + symbols + ["<S>"]
|
509 |
+
elif add_eos:
|
510 |
+
return symbols + ["<eos>"]
|
511 |
+
else:
|
512 |
+
return symbols
|
513 |
+
|
514 |
+
|
515 |
+
class LMOrderedIterator(object):
|
516 |
+
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None):
|
517 |
+
"""
|
518 |
+
data -- LongTensor -- the LongTensor is strictly ordered
|
519 |
+
"""
|
520 |
+
self.bsz = bsz
|
521 |
+
self.bptt = bptt
|
522 |
+
self.ext_len = ext_len if ext_len is not None else 0
|
523 |
+
|
524 |
+
self.device = device
|
525 |
+
|
526 |
+
# Work out how cleanly we can divide the dataset into bsz parts.
|
527 |
+
self.n_step = data.size(0) // bsz
|
528 |
+
|
529 |
+
# Trim off any extra elements that wouldn't cleanly fit (remainders).
|
530 |
+
data = data.narrow(0, 0, self.n_step * bsz)
|
531 |
+
|
532 |
+
# Evenly divide the data across the bsz batches.
|
533 |
+
self.data = data.view(bsz, -1).t().contiguous().to(device)
|
534 |
+
|
535 |
+
# Number of mini-batches
|
536 |
+
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
|
537 |
+
|
538 |
+
def get_batch(self, i, bptt=None):
|
539 |
+
if bptt is None:
|
540 |
+
bptt = self.bptt
|
541 |
+
seq_len = min(bptt, self.data.size(0) - 1 - i)
|
542 |
+
|
543 |
+
end_idx = i + seq_len
|
544 |
+
beg_idx = max(0, i - self.ext_len)
|
545 |
+
|
546 |
+
data = self.data[beg_idx:end_idx]
|
547 |
+
target = self.data[i + 1 : i + 1 + seq_len]
|
548 |
+
|
549 |
+
data_out = data.transpose(0, 1).contiguous().to(self.device)
|
550 |
+
target_out = target.transpose(0, 1).contiguous().to(self.device)
|
551 |
+
|
552 |
+
return data_out, target_out, seq_len
|
553 |
+
|
554 |
+
def get_fixlen_iter(self, start=0):
|
555 |
+
for i in range(start, self.data.size(0) - 1, self.bptt):
|
556 |
+
yield self.get_batch(i)
|
557 |
+
|
558 |
+
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
|
559 |
+
max_len = self.bptt + max_deviation * std
|
560 |
+
i = start
|
561 |
+
while True:
|
562 |
+
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0
|
563 |
+
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
|
564 |
+
data, target, seq_len = self.get_batch(i, bptt)
|
565 |
+
i += seq_len
|
566 |
+
yield data, target, seq_len
|
567 |
+
if i >= self.data.size(0) - 2:
|
568 |
+
break
|
569 |
+
|
570 |
+
def __iter__(self):
|
571 |
+
return self.get_fixlen_iter()
|
572 |
+
|
573 |
+
|
574 |
+
class LMShuffledIterator(object):
|
575 |
+
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
|
576 |
+
"""
|
577 |
+
data -- list[LongTensor] -- there is no order among the LongTensors
|
578 |
+
"""
|
579 |
+
self.data = data
|
580 |
+
|
581 |
+
self.bsz = bsz
|
582 |
+
self.bptt = bptt
|
583 |
+
self.ext_len = ext_len if ext_len is not None else 0
|
584 |
+
|
585 |
+
self.device = device
|
586 |
+
self.shuffle = shuffle
|
587 |
+
|
588 |
+
def get_sent_stream(self):
|
589 |
+
# index iterator
|
590 |
+
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data)))
|
591 |
+
|
592 |
+
# sentence iterator
|
593 |
+
for idx in epoch_indices:
|
594 |
+
yield self.data[idx]
|
595 |
+
|
596 |
+
@torch_only_method
|
597 |
+
def stream_iterator(self, sent_stream):
|
598 |
+
# streams for each data in the batch
|
599 |
+
streams = [None] * self.bsz
|
600 |
+
|
601 |
+
data = torch.LongTensor(self.bptt, self.bsz)
|
602 |
+
target = torch.LongTensor(self.bptt, self.bsz)
|
603 |
+
|
604 |
+
n_retain = 0
|
605 |
+
|
606 |
+
while True:
|
607 |
+
# data : [n_retain+bptt x bsz]
|
608 |
+
# target : [bptt x bsz]
|
609 |
+
data[n_retain:].fill_(-1)
|
610 |
+
target.fill_(-1)
|
611 |
+
|
612 |
+
valid_batch = True
|
613 |
+
|
614 |
+
for i in range(self.bsz):
|
615 |
+
n_filled = 0
|
616 |
+
try:
|
617 |
+
while n_filled < self.bptt:
|
618 |
+
if streams[i] is None or len(streams[i]) <= 1:
|
619 |
+
streams[i] = next(sent_stream)
|
620 |
+
# number of new tokens to fill in
|
621 |
+
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
|
622 |
+
# first n_retain tokens are retained from last batch
|
623 |
+
data[n_retain + n_filled : n_retain + n_filled + n_new, i] = streams[i][:n_new]
|
624 |
+
target[n_filled : n_filled + n_new, i] = streams[i][1 : n_new + 1]
|
625 |
+
streams[i] = streams[i][n_new:]
|
626 |
+
n_filled += n_new
|
627 |
+
except StopIteration:
|
628 |
+
valid_batch = False
|
629 |
+
break
|
630 |
+
|
631 |
+
if not valid_batch:
|
632 |
+
return
|
633 |
+
|
634 |
+
data_out = data.transpose(0, 1).contiguous().to(self.device)
|
635 |
+
target_out = target.transpose(0, 1).contiguous().to(self.device)
|
636 |
+
|
637 |
+
yield data_out, target_out, self.bptt
|
638 |
+
|
639 |
+
n_retain = min(data.size(0), self.ext_len)
|
640 |
+
if n_retain > 0:
|
641 |
+
data[:n_retain] = data[-n_retain:]
|
642 |
+
data.resize_(n_retain + self.bptt, data.size(1))
|
643 |
+
|
644 |
+
def __iter__(self):
|
645 |
+
# sent_stream is an iterator
|
646 |
+
sent_stream = self.get_sent_stream()
|
647 |
+
|
648 |
+
for batch in self.stream_iterator(sent_stream):
|
649 |
+
yield batch
|
650 |
+
|
651 |
+
|
652 |
+
class LMMultiFileIterator(LMShuffledIterator):
|
653 |
+
def __init__(self, paths, vocab, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
|
654 |
+
self.paths = paths
|
655 |
+
self.vocab = vocab
|
656 |
+
|
657 |
+
self.bsz = bsz
|
658 |
+
self.bptt = bptt
|
659 |
+
self.ext_len = ext_len if ext_len is not None else 0
|
660 |
+
|
661 |
+
self.device = device
|
662 |
+
self.shuffle = shuffle
|
663 |
+
|
664 |
+
def get_sent_stream(self, path):
|
665 |
+
sents = self.vocab.encode_file(path, add_double_eos=True)
|
666 |
+
if self.shuffle:
|
667 |
+
np.random.shuffle(sents)
|
668 |
+
sent_stream = iter(sents)
|
669 |
+
|
670 |
+
return sent_stream
|
671 |
+
|
672 |
+
def __iter__(self):
|
673 |
+
if self.shuffle:
|
674 |
+
np.random.shuffle(self.paths)
|
675 |
+
|
676 |
+
for path in self.paths:
|
677 |
+
# sent_stream is an iterator
|
678 |
+
sent_stream = self.get_sent_stream(path)
|
679 |
+
for batch in self.stream_iterator(sent_stream):
|
680 |
+
yield batch
|
681 |
+
|
682 |
+
|
683 |
+
class TransfoXLCorpus(object):
|
684 |
+
@classmethod
|
685 |
+
@torch_only_method
|
686 |
+
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
|
687 |
+
"""
|
688 |
+
Instantiate a pre-processed corpus.
|
689 |
+
"""
|
690 |
+
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
|
691 |
+
is_local = os.path.isdir(pretrained_model_name_or_path)
|
692 |
+
# redirect to the cache, if necessary
|
693 |
+
try:
|
694 |
+
resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir)
|
695 |
+
except EnvironmentError:
|
696 |
+
logger.error(
|
697 |
+
f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list"
|
698 |
+
f" ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}'"
|
699 |
+
f" was a path or url but couldn't find files {CORPUS_NAME} at this path or url."
|
700 |
+
)
|
701 |
+
return None
|
702 |
+
if is_local:
|
703 |
+
logger.info(f"loading corpus file {resolved_corpus_file}")
|
704 |
+
else:
|
705 |
+
logger.info(f"loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}")
|
706 |
+
|
707 |
+
# Instantiate tokenizer.
|
708 |
+
corpus = cls(*inputs, **kwargs)
|
709 |
+
corpus_dict = torch.load(resolved_corpus_file)
|
710 |
+
for key, value in corpus_dict.items():
|
711 |
+
corpus.__dict__[key] = value
|
712 |
+
corpus.vocab = vocab
|
713 |
+
if corpus.train is not None:
|
714 |
+
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
|
715 |
+
if corpus.valid is not None:
|
716 |
+
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
|
717 |
+
if corpus.test is not None:
|
718 |
+
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
|
719 |
+
return corpus
|
720 |
+
|
721 |
+
def __init__(self, *args, **kwargs):
|
722 |
+
self.vocab = TransfoXLTokenizer(*args, **kwargs)
|
723 |
+
self.dataset = None
|
724 |
+
self.train = None
|
725 |
+
self.valid = None
|
726 |
+
self.test = None
|
727 |
+
|
728 |
+
def build_corpus(self, path, dataset):
|
729 |
+
self.dataset = dataset
|
730 |
+
|
731 |
+
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
|
732 |
+
self.vocab.count_file(os.path.join(path, "train.txt"))
|
733 |
+
self.vocab.count_file(os.path.join(path, "valid.txt"))
|
734 |
+
self.vocab.count_file(os.path.join(path, "test.txt"))
|
735 |
+
elif self.dataset == "wt103":
|
736 |
+
self.vocab.count_file(os.path.join(path, "train.txt"))
|
737 |
+
elif self.dataset == "lm1b":
|
738 |
+
train_path_pattern = os.path.join(
|
739 |
+
path,
|
740 |
+
"1-billion-word-language-modeling-benchmark-r13output",
|
741 |
+
"training-monolingual.tokenized.shuffled",
|
742 |
+
"news.en-*",
|
743 |
+
)
|
744 |
+
train_paths = glob.glob(train_path_pattern)
|
745 |
+
# the vocab will load from file when build_vocab() is called
|
746 |
+
|
747 |
+
self.vocab.build_vocab()
|
748 |
+
|
749 |
+
if self.dataset in ["ptb", "wt2", "wt103"]:
|
750 |
+
self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True)
|
751 |
+
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True)
|
752 |
+
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True)
|
753 |
+
elif self.dataset in ["enwik8", "text8"]:
|
754 |
+
self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True, add_eos=False)
|
755 |
+
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True, add_eos=False)
|
756 |
+
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True, add_eos=False)
|
757 |
+
elif self.dataset == "lm1b":
|
758 |
+
self.train = train_paths
|
759 |
+
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True)
|
760 |
+
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=False, add_double_eos=True)
|
761 |
+
|
762 |
+
def get_iterator(self, split, *args, **kwargs):
|
763 |
+
if split == "train":
|
764 |
+
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
|
765 |
+
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
|
766 |
+
elif self.dataset == "lm1b":
|
767 |
+
kwargs["shuffle"] = True
|
768 |
+
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
|
769 |
+
elif split in ["valid", "test"]:
|
770 |
+
data = self.valid if split == "valid" else self.test
|
771 |
+
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
|
772 |
+
data_iter = LMOrderedIterator(data, *args, **kwargs)
|
773 |
+
elif self.dataset == "lm1b":
|
774 |
+
data_iter = LMShuffledIterator(data, *args, **kwargs)
|
775 |
+
else:
|
776 |
+
data_iter = None
|
777 |
+
raise ValueError(f"Split not recognized: {split}")
|
778 |
+
|
779 |
+
return data_iter
|
780 |
+
|
781 |
+
|
782 |
+
@torch_only_method
|
783 |
+
def get_lm_corpus(datadir, dataset):
|
784 |
+
fn = os.path.join(datadir, "cache.pt")
|
785 |
+
fn_pickle = os.path.join(datadir, "cache.pkl")
|
786 |
+
if os.path.exists(fn):
|
787 |
+
logger.info("Loading cached dataset...")
|
788 |
+
corpus = torch.load(fn_pickle)
|
789 |
+
elif os.path.exists(fn):
|
790 |
+
logger.info("Loading cached dataset from pickle...")
|
791 |
+
if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
|
792 |
+
raise ValueError(
|
793 |
+
"This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
|
794 |
+
"malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
|
795 |
+
"that could have been tampered with. If you already verified the pickle data and decided to use it, "
|
796 |
+
"you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
|
797 |
+
)
|
798 |
+
with open(fn, "rb") as fp:
|
799 |
+
corpus = pickle.load(fp)
|
800 |
+
else:
|
801 |
+
logger.info(f"Producing dataset {dataset}...")
|
802 |
+
kwargs = {}
|
803 |
+
if dataset in ["wt103", "wt2"]:
|
804 |
+
kwargs["special"] = ["<eos>"]
|
805 |
+
kwargs["lower_case"] = False
|
806 |
+
elif dataset == "ptb":
|
807 |
+
kwargs["special"] = ["<eos>"]
|
808 |
+
kwargs["lower_case"] = True
|
809 |
+
elif dataset == "lm1b":
|
810 |
+
kwargs["special"] = []
|
811 |
+
kwargs["lower_case"] = False
|
812 |
+
kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
|
813 |
+
elif dataset in ["enwik8", "text8"]:
|
814 |
+
pass
|
815 |
+
|
816 |
+
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
|
817 |
+
torch.save(corpus, fn)
|
818 |
+
|
819 |
+
return corpus
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__init__.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
|
20 |
+
|
21 |
+
|
22 |
+
try:
|
23 |
+
if not is_torch_available():
|
24 |
+
raise OptionalDependencyNotAvailable()
|
25 |
+
except OptionalDependencyNotAvailable:
|
26 |
+
pass
|
27 |
+
else:
|
28 |
+
_import_structure["modeling_van"] = [
|
29 |
+
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
|
30 |
+
"VanForImageClassification",
|
31 |
+
"VanModel",
|
32 |
+
"VanPreTrainedModel",
|
33 |
+
]
|
34 |
+
|
35 |
+
if TYPE_CHECKING:
|
36 |
+
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
|
37 |
+
|
38 |
+
try:
|
39 |
+
if not is_torch_available():
|
40 |
+
raise OptionalDependencyNotAvailable()
|
41 |
+
except OptionalDependencyNotAvailable:
|
42 |
+
pass
|
43 |
+
else:
|
44 |
+
from .modeling_van import (
|
45 |
+
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
|
46 |
+
VanForImageClassification,
|
47 |
+
VanModel,
|
48 |
+
VanPreTrainedModel,
|
49 |
+
)
|
50 |
+
|
51 |
+
else:
|
52 |
+
import sys
|
53 |
+
|
54 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (909 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/configuration_van.cpython-310.pyc
ADDED
Binary file (4.22 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/convert_van_to_pytorch.cpython-310.pyc
ADDED
Binary file (8.49 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/modeling_van.cpython-310.pyc
ADDED
Binary file (18.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/configuration_van.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" VAN model configuration"""
|
16 |
+
|
17 |
+
from ....configuration_utils import PretrainedConfig
|
18 |
+
from ....utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from .._archive_maps import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class VanConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`VanModel`]. It is used to instantiate a VAN model
|
30 |
+
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
31 |
+
defaults will yield a similar configuration to that of the VAN
|
32 |
+
[Visual-Attention-Network/van-base](https://huggingface.co/Visual-Attention-Network/van-base) architecture.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
image_size (`int`, *optional*, defaults to 224):
|
39 |
+
The size (resolution) of each image.
|
40 |
+
num_channels (`int`, *optional*, defaults to 3):
|
41 |
+
The number of input channels.
|
42 |
+
patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
|
43 |
+
Patch size to use in each stage's embedding layer.
|
44 |
+
strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
|
45 |
+
Stride size to use in each stage's embedding layer to downsample the input.
|
46 |
+
hidden_sizes (`List[int]`, *optional*, defaults to `[64, 128, 320, 512]`):
|
47 |
+
Dimensionality (hidden size) at each stage.
|
48 |
+
depths (`List[int]`, *optional*, defaults to `[3, 3, 12, 3]`):
|
49 |
+
Depth (number of layers) for each stage.
|
50 |
+
mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`):
|
51 |
+
The expansion ratio for mlp layer at each stage.
|
52 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
53 |
+
The non-linear activation function (function or string) in each layer. If string, `"gelu"`, `"relu"`,
|
54 |
+
`"selu"` and `"gelu_new"` are supported.
|
55 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
56 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
57 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
58 |
+
The epsilon used by the layer normalization layers.
|
59 |
+
layer_scale_init_value (`float`, *optional*, defaults to 0.01):
|
60 |
+
The initial value for layer scaling.
|
61 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
62 |
+
The dropout probability for stochastic depth.
|
63 |
+
dropout_rate (`float`, *optional*, defaults to 0.0):
|
64 |
+
The dropout probability for dropout.
|
65 |
+
|
66 |
+
Example:
|
67 |
+
```python
|
68 |
+
>>> from transformers import VanModel, VanConfig
|
69 |
+
|
70 |
+
>>> # Initializing a VAN van-base style configuration
|
71 |
+
>>> configuration = VanConfig()
|
72 |
+
>>> # Initializing a model from the van-base style configuration
|
73 |
+
>>> model = VanModel(configuration)
|
74 |
+
>>> # Accessing the model configuration
|
75 |
+
>>> configuration = model.config
|
76 |
+
```"""
|
77 |
+
|
78 |
+
model_type = "van"
|
79 |
+
|
80 |
+
def __init__(
|
81 |
+
self,
|
82 |
+
image_size=224,
|
83 |
+
num_channels=3,
|
84 |
+
patch_sizes=[7, 3, 3, 3],
|
85 |
+
strides=[4, 2, 2, 2],
|
86 |
+
hidden_sizes=[64, 128, 320, 512],
|
87 |
+
depths=[3, 3, 12, 3],
|
88 |
+
mlp_ratios=[8, 8, 4, 4],
|
89 |
+
hidden_act="gelu",
|
90 |
+
initializer_range=0.02,
|
91 |
+
layer_norm_eps=1e-6,
|
92 |
+
layer_scale_init_value=1e-2,
|
93 |
+
drop_path_rate=0.0,
|
94 |
+
dropout_rate=0.0,
|
95 |
+
**kwargs,
|
96 |
+
):
|
97 |
+
super().__init__(**kwargs)
|
98 |
+
self.image_size = image_size
|
99 |
+
self.num_channels = num_channels
|
100 |
+
self.patch_sizes = patch_sizes
|
101 |
+
self.strides = strides
|
102 |
+
self.hidden_sizes = hidden_sizes
|
103 |
+
self.depths = depths
|
104 |
+
self.mlp_ratios = mlp_ratios
|
105 |
+
self.hidden_act = hidden_act
|
106 |
+
self.initializer_range = initializer_range
|
107 |
+
self.layer_norm_eps = layer_norm_eps
|
108 |
+
self.layer_scale_init_value = layer_scale_init_value
|
109 |
+
self.drop_path_rate = drop_path_rate
|
110 |
+
self.dropout_rate = dropout_rate
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/van/convert_van_to_pytorch.py
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 BNRist (Tsinghua University), TKLNDST (Nankai University) and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert VAN checkpoints from the original repository.
|
16 |
+
|
17 |
+
URL: https://github.com/Visual-Attention-Network/VAN-Classification"""
|
18 |
+
|
19 |
+
|
20 |
+
import argparse
|
21 |
+
import json
|
22 |
+
import sys
|
23 |
+
from dataclasses import dataclass, field
|
24 |
+
from functools import partial
|
25 |
+
from pathlib import Path
|
26 |
+
from typing import List
|
27 |
+
|
28 |
+
import torch
|
29 |
+
import torch.nn as nn
|
30 |
+
from huggingface_hub import cached_download, hf_hub_download
|
31 |
+
from torch import Tensor
|
32 |
+
|
33 |
+
from transformers import AutoImageProcessor, VanConfig, VanForImageClassification
|
34 |
+
from transformers.models.deprecated.van.modeling_van import VanLayerScaling
|
35 |
+
from transformers.utils import logging
|
36 |
+
|
37 |
+
|
38 |
+
logging.set_verbosity_info()
|
39 |
+
logger = logging.get_logger(__name__)
|
40 |
+
|
41 |
+
|
42 |
+
@dataclass
|
43 |
+
class Tracker:
|
44 |
+
module: nn.Module
|
45 |
+
traced: List[nn.Module] = field(default_factory=list)
|
46 |
+
handles: list = field(default_factory=list)
|
47 |
+
|
48 |
+
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
|
49 |
+
has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)
|
50 |
+
if has_not_submodules:
|
51 |
+
if not isinstance(m, VanLayerScaling):
|
52 |
+
self.traced.append(m)
|
53 |
+
|
54 |
+
def __call__(self, x: Tensor):
|
55 |
+
for m in self.module.modules():
|
56 |
+
self.handles.append(m.register_forward_hook(self._forward_hook))
|
57 |
+
self.module(x)
|
58 |
+
[x.remove() for x in self.handles]
|
59 |
+
return self
|
60 |
+
|
61 |
+
@property
|
62 |
+
def parametrized(self):
|
63 |
+
# check the len of the state_dict keys to see if we have learnable params
|
64 |
+
return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
|
65 |
+
|
66 |
+
|
67 |
+
@dataclass
|
68 |
+
class ModuleTransfer:
|
69 |
+
src: nn.Module
|
70 |
+
dest: nn.Module
|
71 |
+
verbose: int = 0
|
72 |
+
src_skip: List = field(default_factory=list)
|
73 |
+
dest_skip: List = field(default_factory=list)
|
74 |
+
|
75 |
+
def __call__(self, x: Tensor):
|
76 |
+
"""
|
77 |
+
Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
|
78 |
+
hood we tracked all the operations in both modules.
|
79 |
+
"""
|
80 |
+
dest_traced = Tracker(self.dest)(x).parametrized
|
81 |
+
src_traced = Tracker(self.src)(x).parametrized
|
82 |
+
|
83 |
+
src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced))
|
84 |
+
dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced))
|
85 |
+
|
86 |
+
if len(dest_traced) != len(src_traced):
|
87 |
+
raise Exception(
|
88 |
+
f"Numbers of operations are different. Source module has {len(src_traced)} operations while"
|
89 |
+
f" destination module has {len(dest_traced)}."
|
90 |
+
)
|
91 |
+
|
92 |
+
for dest_m, src_m in zip(dest_traced, src_traced):
|
93 |
+
dest_m.load_state_dict(src_m.state_dict())
|
94 |
+
if self.verbose == 1:
|
95 |
+
print(f"Transfered from={src_m} to={dest_m}")
|
96 |
+
|
97 |
+
|
98 |
+
def copy_parameters(from_model: nn.Module, our_model: nn.Module) -> nn.Module:
|
99 |
+
# nn.Parameter cannot be tracked by the Tracker, thus we need to manually convert them
|
100 |
+
from_state_dict = from_model.state_dict()
|
101 |
+
our_state_dict = our_model.state_dict()
|
102 |
+
config = our_model.config
|
103 |
+
all_keys = []
|
104 |
+
for stage_idx in range(len(config.hidden_sizes)):
|
105 |
+
for block_id in range(config.depths[stage_idx]):
|
106 |
+
from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_1"
|
107 |
+
to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.attention_scaling.weight"
|
108 |
+
|
109 |
+
all_keys.append((from_key, to_key))
|
110 |
+
from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_2"
|
111 |
+
to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.mlp_scaling.weight"
|
112 |
+
|
113 |
+
all_keys.append((from_key, to_key))
|
114 |
+
|
115 |
+
for from_key, to_key in all_keys:
|
116 |
+
our_state_dict[to_key] = from_state_dict.pop(from_key)
|
117 |
+
|
118 |
+
our_model.load_state_dict(our_state_dict)
|
119 |
+
return our_model
|
120 |
+
|
121 |
+
|
122 |
+
def convert_weight_and_push(
|
123 |
+
name: str,
|
124 |
+
config: VanConfig,
|
125 |
+
checkpoint: str,
|
126 |
+
from_model: nn.Module,
|
127 |
+
save_directory: Path,
|
128 |
+
push_to_hub: bool = True,
|
129 |
+
):
|
130 |
+
print(f"Downloading weights for {name}...")
|
131 |
+
checkpoint_path = cached_download(checkpoint)
|
132 |
+
print(f"Converting {name}...")
|
133 |
+
from_state_dict = torch.load(checkpoint_path)["state_dict"]
|
134 |
+
from_model.load_state_dict(from_state_dict)
|
135 |
+
from_model.eval()
|
136 |
+
with torch.no_grad():
|
137 |
+
our_model = VanForImageClassification(config).eval()
|
138 |
+
module_transfer = ModuleTransfer(src=from_model, dest=our_model)
|
139 |
+
x = torch.randn((1, 3, 224, 224))
|
140 |
+
module_transfer(x)
|
141 |
+
our_model = copy_parameters(from_model, our_model)
|
142 |
+
|
143 |
+
if not torch.allclose(from_model(x), our_model(x).logits):
|
144 |
+
raise ValueError("The model logits don't match the original one.")
|
145 |
+
|
146 |
+
checkpoint_name = name
|
147 |
+
print(checkpoint_name)
|
148 |
+
|
149 |
+
if push_to_hub:
|
150 |
+
our_model.push_to_hub(
|
151 |
+
repo_path_or_name=save_directory / checkpoint_name,
|
152 |
+
commit_message="Add model",
|
153 |
+
use_temp_dir=True,
|
154 |
+
)
|
155 |
+
|
156 |
+
# we can use the convnext one
|
157 |
+
image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
|
158 |
+
image_processor.push_to_hub(
|
159 |
+
repo_path_or_name=save_directory / checkpoint_name,
|
160 |
+
commit_message="Add image processor",
|
161 |
+
use_temp_dir=True,
|
162 |
+
)
|
163 |
+
|
164 |
+
print(f"Pushed {checkpoint_name}")
|
165 |
+
|
166 |
+
|
167 |
+
def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True):
|
168 |
+
filename = "imagenet-1k-id2label.json"
|
169 |
+
num_labels = 1000
|
170 |
+
|
171 |
+
repo_id = "huggingface/label-files"
|
172 |
+
num_labels = num_labels
|
173 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
174 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
175 |
+
|
176 |
+
id2label = id2label
|
177 |
+
label2id = {v: k for k, v in id2label.items()}
|
178 |
+
|
179 |
+
ImageNetPreTrainedConfig = partial(VanConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
|
180 |
+
|
181 |
+
names_to_config = {
|
182 |
+
"van-tiny": ImageNetPreTrainedConfig(
|
183 |
+
hidden_sizes=[32, 64, 160, 256],
|
184 |
+
depths=[3, 3, 5, 2],
|
185 |
+
mlp_ratios=[8, 8, 4, 4],
|
186 |
+
),
|
187 |
+
"van-small": ImageNetPreTrainedConfig(
|
188 |
+
hidden_sizes=[64, 128, 320, 512],
|
189 |
+
depths=[2, 2, 4, 2],
|
190 |
+
mlp_ratios=[8, 8, 4, 4],
|
191 |
+
),
|
192 |
+
"van-base": ImageNetPreTrainedConfig(
|
193 |
+
hidden_sizes=[64, 128, 320, 512],
|
194 |
+
depths=[3, 3, 12, 3],
|
195 |
+
mlp_ratios=[8, 8, 4, 4],
|
196 |
+
),
|
197 |
+
"van-large": ImageNetPreTrainedConfig(
|
198 |
+
hidden_sizes=[64, 128, 320, 512],
|
199 |
+
depths=[3, 5, 27, 3],
|
200 |
+
mlp_ratios=[8, 8, 4, 4],
|
201 |
+
),
|
202 |
+
}
|
203 |
+
|
204 |
+
names_to_original_models = {
|
205 |
+
"van-tiny": van_tiny,
|
206 |
+
"van-small": van_small,
|
207 |
+
"van-base": van_base,
|
208 |
+
"van-large": van_large,
|
209 |
+
}
|
210 |
+
|
211 |
+
names_to_original_checkpoints = {
|
212 |
+
"van-tiny": (
|
213 |
+
"https://huggingface.co/Visual-Attention-Network/VAN-Tiny-original/resolve/main/van_tiny_754.pth.tar"
|
214 |
+
),
|
215 |
+
"van-small": (
|
216 |
+
"https://huggingface.co/Visual-Attention-Network/VAN-Small-original/resolve/main/van_small_811.pth.tar"
|
217 |
+
),
|
218 |
+
"van-base": (
|
219 |
+
"https://huggingface.co/Visual-Attention-Network/VAN-Base-original/resolve/main/van_base_828.pth.tar"
|
220 |
+
),
|
221 |
+
"van-large": (
|
222 |
+
"https://huggingface.co/Visual-Attention-Network/VAN-Large-original/resolve/main/van_large_839.pth.tar"
|
223 |
+
),
|
224 |
+
}
|
225 |
+
|
226 |
+
if model_name:
|
227 |
+
convert_weight_and_push(
|
228 |
+
model_name,
|
229 |
+
names_to_config[model_name],
|
230 |
+
checkpoint=names_to_original_checkpoints[model_name],
|
231 |
+
from_model=names_to_original_models[model_name](),
|
232 |
+
save_directory=save_directory,
|
233 |
+
push_to_hub=push_to_hub,
|
234 |
+
)
|
235 |
+
else:
|
236 |
+
for model_name, config in names_to_config.items():
|
237 |
+
convert_weight_and_push(
|
238 |
+
model_name,
|
239 |
+
config,
|
240 |
+
checkpoint=names_to_original_checkpoints[model_name],
|
241 |
+
from_model=names_to_original_models[model_name](),
|
242 |
+
save_directory=save_directory,
|
243 |
+
push_to_hub=push_to_hub,
|
244 |
+
)
|
245 |
+
|
246 |
+
|
247 |
+
if __name__ == "__main__":
|
248 |
+
parser = argparse.ArgumentParser()
|
249 |
+
# Required parameters
|
250 |
+
parser.add_argument(
|
251 |
+
"--model-name",
|
252 |
+
default=None,
|
253 |
+
type=str,
|
254 |
+
help=(
|
255 |
+
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
|
256 |
+
" currently: van-tiny/small/base/large. If `None`, all of them will the converted."
|
257 |
+
),
|
258 |
+
)
|
259 |
+
parser.add_argument(
|
260 |
+
"--pytorch_dump_folder_path",
|
261 |
+
default=None,
|
262 |
+
type=Path,
|
263 |
+
required=True,
|
264 |
+
help="Path to the output PyTorch model directory.",
|
265 |
+
)
|
266 |
+
parser.add_argument(
|
267 |
+
"--van_dir",
|
268 |
+
required=True,
|
269 |
+
type=Path,
|
270 |
+
help=(
|
271 |
+
"A path to VAN's original implementation directory. You can download from here:"
|
272 |
+
" https://github.com/Visual-Attention-Network/VAN-Classification"
|
273 |
+
),
|
274 |
+
)
|
275 |
+
parser.add_argument(
|
276 |
+
"--push_to_hub",
|
277 |
+
default=True,
|
278 |
+
type=bool,
|
279 |
+
required=False,
|
280 |
+
help="If True, push model and image processor to the hub.",
|
281 |
+
)
|
282 |
+
|
283 |
+
args = parser.parse_args()
|
284 |
+
pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
|
285 |
+
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
|
286 |
+
van_dir = args.van_dir
|
287 |
+
# append the path to the parents to maskformer dir
|
288 |
+
sys.path.append(str(van_dir.parent))
|
289 |
+
from van.models.van import van_base, van_large, van_small, van_tiny
|
290 |
+
|
291 |
+
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
|