Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py +142 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py +395 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py +114 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py +1597 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py +1505 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py +1556 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py +427 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py +309 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__init__.py +83 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/convert_clvp_to_hf.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/number_normalizer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/configuration_clvp.py +456 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/convert_clvp_to_hf.py +234 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/feature_extraction_clvp.py +238 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/modeling_clvp.py +2022 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py +238 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py +91 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/tokenization_clvp.py +364 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__init__.py +61 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/convert_dinov2_to_hf.py +287 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dit/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py +231 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__init__.py +68 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/convert_custom_code_checkpoint.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/modeling_falcon.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/configuration_falcon.py +201 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/convert_custom_code_checkpoint.py +74 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__init__.py +60 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/configuration_lilt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/modeling_lilt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/configuration_lilt.py +131 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/modeling_lilt.py +1186 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import (
|
18 |
+
OptionalDependencyNotAvailable,
|
19 |
+
_LazyModule,
|
20 |
+
is_flax_available,
|
21 |
+
is_tf_available,
|
22 |
+
is_tokenizers_available,
|
23 |
+
is_torch_available,
|
24 |
+
)
|
25 |
+
|
26 |
+
|
27 |
+
_import_structure = {
|
28 |
+
"configuration_blenderbot": [
|
29 |
+
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
30 |
+
"BlenderbotConfig",
|
31 |
+
"BlenderbotOnnxConfig",
|
32 |
+
],
|
33 |
+
"tokenization_blenderbot": ["BlenderbotTokenizer"],
|
34 |
+
}
|
35 |
+
|
36 |
+
try:
|
37 |
+
if not is_tokenizers_available():
|
38 |
+
raise OptionalDependencyNotAvailable()
|
39 |
+
except OptionalDependencyNotAvailable:
|
40 |
+
pass
|
41 |
+
else:
|
42 |
+
_import_structure["tokenization_blenderbot_fast"] = ["BlenderbotTokenizerFast"]
|
43 |
+
|
44 |
+
try:
|
45 |
+
if not is_torch_available():
|
46 |
+
raise OptionalDependencyNotAvailable()
|
47 |
+
except OptionalDependencyNotAvailable:
|
48 |
+
pass
|
49 |
+
else:
|
50 |
+
_import_structure["modeling_blenderbot"] = [
|
51 |
+
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
52 |
+
"BlenderbotForCausalLM",
|
53 |
+
"BlenderbotForConditionalGeneration",
|
54 |
+
"BlenderbotModel",
|
55 |
+
"BlenderbotPreTrainedModel",
|
56 |
+
]
|
57 |
+
|
58 |
+
|
59 |
+
try:
|
60 |
+
if not is_tf_available():
|
61 |
+
raise OptionalDependencyNotAvailable()
|
62 |
+
except OptionalDependencyNotAvailable:
|
63 |
+
pass
|
64 |
+
else:
|
65 |
+
_import_structure["modeling_tf_blenderbot"] = [
|
66 |
+
"TFBlenderbotForConditionalGeneration",
|
67 |
+
"TFBlenderbotModel",
|
68 |
+
"TFBlenderbotPreTrainedModel",
|
69 |
+
]
|
70 |
+
|
71 |
+
|
72 |
+
try:
|
73 |
+
if not is_flax_available():
|
74 |
+
raise OptionalDependencyNotAvailable()
|
75 |
+
except OptionalDependencyNotAvailable:
|
76 |
+
pass
|
77 |
+
else:
|
78 |
+
_import_structure["modeling_flax_blenderbot"] = [
|
79 |
+
"FlaxBlenderbotForConditionalGeneration",
|
80 |
+
"FlaxBlenderbotModel",
|
81 |
+
"FlaxBlenderbotPreTrainedModel",
|
82 |
+
]
|
83 |
+
|
84 |
+
|
85 |
+
if TYPE_CHECKING:
|
86 |
+
from .configuration_blenderbot import (
|
87 |
+
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
88 |
+
BlenderbotConfig,
|
89 |
+
BlenderbotOnnxConfig,
|
90 |
+
)
|
91 |
+
from .tokenization_blenderbot import BlenderbotTokenizer
|
92 |
+
|
93 |
+
try:
|
94 |
+
if not is_tokenizers_available():
|
95 |
+
raise OptionalDependencyNotAvailable()
|
96 |
+
except OptionalDependencyNotAvailable:
|
97 |
+
pass
|
98 |
+
else:
|
99 |
+
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
|
100 |
+
|
101 |
+
try:
|
102 |
+
if not is_torch_available():
|
103 |
+
raise OptionalDependencyNotAvailable()
|
104 |
+
except OptionalDependencyNotAvailable:
|
105 |
+
pass
|
106 |
+
else:
|
107 |
+
from .modeling_blenderbot import (
|
108 |
+
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
109 |
+
BlenderbotForCausalLM,
|
110 |
+
BlenderbotForConditionalGeneration,
|
111 |
+
BlenderbotModel,
|
112 |
+
BlenderbotPreTrainedModel,
|
113 |
+
)
|
114 |
+
|
115 |
+
try:
|
116 |
+
if not is_tf_available():
|
117 |
+
raise OptionalDependencyNotAvailable()
|
118 |
+
except OptionalDependencyNotAvailable:
|
119 |
+
pass
|
120 |
+
else:
|
121 |
+
from .modeling_tf_blenderbot import (
|
122 |
+
TFBlenderbotForConditionalGeneration,
|
123 |
+
TFBlenderbotModel,
|
124 |
+
TFBlenderbotPreTrainedModel,
|
125 |
+
)
|
126 |
+
|
127 |
+
try:
|
128 |
+
if not is_flax_available():
|
129 |
+
raise OptionalDependencyNotAvailable()
|
130 |
+
except OptionalDependencyNotAvailable:
|
131 |
+
pass
|
132 |
+
else:
|
133 |
+
from .modeling_flax_blenderbot import (
|
134 |
+
FlaxBlenderbotForConditionalGeneration,
|
135 |
+
FlaxBlenderbotModel,
|
136 |
+
FlaxBlenderbotPreTrainedModel,
|
137 |
+
)
|
138 |
+
|
139 |
+
else:
|
140 |
+
import sys
|
141 |
+
|
142 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.93 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc
ADDED
Binary file (12.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
ADDED
Binary file (2.94 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc
ADDED
Binary file (53.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc
ADDED
Binary file (41.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc
ADDED
Binary file (49.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc
ADDED
Binary file (16.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" Blenderbot model configuration"""
|
16 |
+
|
17 |
+
from collections import OrderedDict
|
18 |
+
from typing import Any, Mapping, Optional
|
19 |
+
|
20 |
+
from ... import PreTrainedTokenizer
|
21 |
+
from ...configuration_utils import PretrainedConfig
|
22 |
+
from ...file_utils import TensorType, is_torch_available
|
23 |
+
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
|
24 |
+
from ...onnx.utils import compute_effective_axis_dimension
|
25 |
+
from ...utils import logging
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
|
31 |
+
from ..deprecated._archive_maps import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
32 |
+
|
33 |
+
|
34 |
+
class BlenderbotConfig(PretrainedConfig):
|
35 |
+
r"""
|
36 |
+
This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an
|
37 |
+
Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a
|
38 |
+
configuration with the defaults will yield a similar configuration to that of the Blenderbot
|
39 |
+
[facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture.
|
40 |
+
|
41 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
42 |
+
documentation from [`PretrainedConfig`] for more information.
|
43 |
+
|
44 |
+
|
45 |
+
Args:
|
46 |
+
vocab_size (`int`, *optional*, defaults to 50265):
|
47 |
+
Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by
|
48 |
+
the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`].
|
49 |
+
d_model (`int`, *optional*, defaults to 1024):
|
50 |
+
Dimensionality of the layers and the pooler layer.
|
51 |
+
encoder_layers (`int`, *optional*, defaults to 12):
|
52 |
+
Number of encoder layers.
|
53 |
+
decoder_layers (`int`, *optional*, defaults to 12):
|
54 |
+
Number of decoder layers.
|
55 |
+
encoder_attention_heads (`int`, *optional*, defaults to 16):
|
56 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
57 |
+
decoder_attention_heads (`int`, *optional*, defaults to 16):
|
58 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
59 |
+
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
60 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
61 |
+
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
62 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
63 |
+
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
|
64 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
65 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
66 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
67 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
68 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
69 |
+
The dropout ratio for the attention probabilities.
|
70 |
+
activation_dropout (`float`, *optional*, defaults to 0.0):
|
71 |
+
The dropout ratio for activations inside the fully connected layer.
|
72 |
+
max_position_embeddings (`int`, *optional*, defaults to 128):
|
73 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
74 |
+
just in case (e.g., 512 or 1024 or 2048).
|
75 |
+
init_std (`float`, *optional*, defaults to 0.02):
|
76 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
77 |
+
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
78 |
+
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
79 |
+
for more details.
|
80 |
+
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
81 |
+
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
82 |
+
for more details.
|
83 |
+
scale_embedding (`bool`, *optional*, defaults to `False`):
|
84 |
+
Scale embeddings by diving by sqrt(d_model).
|
85 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
86 |
+
Whether or not the model should return the last key/values attentions (not used by all models)
|
87 |
+
forced_eos_token_id (`int`, *optional*, defaults to 2):
|
88 |
+
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
|
89 |
+
`eos_token_id`.
|
90 |
+
|
91 |
+
Example:
|
92 |
+
|
93 |
+
```python
|
94 |
+
>>> from transformers import BlenderbotConfig, BlenderbotModel
|
95 |
+
|
96 |
+
>>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration
|
97 |
+
>>> configuration = BlenderbotConfig()
|
98 |
+
|
99 |
+
>>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration
|
100 |
+
>>> model = BlenderbotModel(configuration)
|
101 |
+
|
102 |
+
>>> # Accessing the model configuration
|
103 |
+
>>> configuration = model.config
|
104 |
+
```"""
|
105 |
+
|
106 |
+
model_type = "blenderbot"
|
107 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
108 |
+
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
|
109 |
+
|
110 |
+
def __init__(
|
111 |
+
self,
|
112 |
+
vocab_size=8008,
|
113 |
+
max_position_embeddings=128,
|
114 |
+
encoder_layers=2,
|
115 |
+
encoder_ffn_dim=10240,
|
116 |
+
encoder_attention_heads=32,
|
117 |
+
decoder_layers=24,
|
118 |
+
decoder_ffn_dim=10240,
|
119 |
+
decoder_attention_heads=32,
|
120 |
+
encoder_layerdrop=0.0,
|
121 |
+
decoder_layerdrop=0.0,
|
122 |
+
use_cache=True,
|
123 |
+
is_encoder_decoder=True,
|
124 |
+
activation_function="gelu",
|
125 |
+
d_model=2560,
|
126 |
+
dropout=0.1,
|
127 |
+
attention_dropout=0.0,
|
128 |
+
activation_dropout=0.0,
|
129 |
+
init_std=0.02,
|
130 |
+
decoder_start_token_id=1,
|
131 |
+
scale_embedding=False,
|
132 |
+
pad_token_id=0,
|
133 |
+
bos_token_id=1,
|
134 |
+
eos_token_id=2,
|
135 |
+
encoder_no_repeat_ngram_size=3,
|
136 |
+
forced_eos_token_id=2,
|
137 |
+
**kwargs,
|
138 |
+
):
|
139 |
+
self.vocab_size = vocab_size
|
140 |
+
self.max_position_embeddings = max_position_embeddings
|
141 |
+
self.d_model = d_model
|
142 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
143 |
+
self.encoder_layers = encoder_layers
|
144 |
+
self.encoder_attention_heads = encoder_attention_heads
|
145 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
146 |
+
self.decoder_layers = decoder_layers
|
147 |
+
self.decoder_attention_heads = decoder_attention_heads
|
148 |
+
self.dropout = dropout
|
149 |
+
self.attention_dropout = attention_dropout
|
150 |
+
self.activation_dropout = activation_dropout
|
151 |
+
self.activation_function = activation_function
|
152 |
+
self.init_std = init_std
|
153 |
+
self.encoder_layerdrop = encoder_layerdrop
|
154 |
+
self.decoder_layerdrop = decoder_layerdrop
|
155 |
+
self.use_cache = use_cache
|
156 |
+
self.num_hidden_layers = encoder_layers
|
157 |
+
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
158 |
+
|
159 |
+
super().__init__(
|
160 |
+
pad_token_id=pad_token_id,
|
161 |
+
bos_token_id=bos_token_id,
|
162 |
+
eos_token_id=eos_token_id,
|
163 |
+
is_encoder_decoder=is_encoder_decoder,
|
164 |
+
decoder_start_token_id=decoder_start_token_id,
|
165 |
+
encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
|
166 |
+
forced_eos_token_id=forced_eos_token_id,
|
167 |
+
**kwargs,
|
168 |
+
)
|
169 |
+
|
170 |
+
|
171 |
+
class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast):
|
172 |
+
@property
|
173 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
174 |
+
if self.task in ["default", "seq2seq-lm"]:
|
175 |
+
common_inputs = OrderedDict(
|
176 |
+
[
|
177 |
+
("input_ids", {0: "batch", 1: "encoder_sequence"}),
|
178 |
+
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
|
179 |
+
]
|
180 |
+
)
|
181 |
+
if self.use_past:
|
182 |
+
common_inputs["decoder_input_ids"] = {0: "batch"}
|
183 |
+
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
|
184 |
+
else:
|
185 |
+
common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
|
186 |
+
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
|
187 |
+
if self.use_past:
|
188 |
+
self.fill_with_past_key_values_(common_inputs, direction="inputs")
|
189 |
+
elif self.task == "causal-lm":
|
190 |
+
common_inputs = OrderedDict(
|
191 |
+
[
|
192 |
+
("input_ids", {0: "batch", 1: "encoder_sequence"}),
|
193 |
+
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
|
194 |
+
]
|
195 |
+
)
|
196 |
+
if self.use_past:
|
197 |
+
_, num_decoder_layers = self.num_layers
|
198 |
+
for i in range(num_decoder_layers):
|
199 |
+
common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
|
200 |
+
common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
|
201 |
+
else:
|
202 |
+
common_inputs = OrderedDict(
|
203 |
+
[
|
204 |
+
("input_ids", {0: "batch", 1: "encoder_sequence"}),
|
205 |
+
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
|
206 |
+
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
|
207 |
+
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
|
208 |
+
]
|
209 |
+
)
|
210 |
+
|
211 |
+
return common_inputs
|
212 |
+
|
213 |
+
@property
|
214 |
+
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
|
215 |
+
def outputs(self) -> Mapping[str, Mapping[int, str]]:
|
216 |
+
if self.task in ["default", "seq2seq-lm"]:
|
217 |
+
common_outputs = super().outputs
|
218 |
+
else:
|
219 |
+
common_outputs = super(OnnxConfigWithPast, self).outputs
|
220 |
+
if self.use_past:
|
221 |
+
num_encoder_layers, _ = self.num_layers
|
222 |
+
for i in range(num_encoder_layers):
|
223 |
+
common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
|
224 |
+
common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
|
225 |
+
return common_outputs
|
226 |
+
|
227 |
+
def _generate_dummy_inputs_for_default_and_seq2seq_lm(
|
228 |
+
self,
|
229 |
+
tokenizer: PreTrainedTokenizer,
|
230 |
+
batch_size: int = -1,
|
231 |
+
seq_length: int = -1,
|
232 |
+
is_pair: bool = False,
|
233 |
+
framework: Optional[TensorType] = None,
|
234 |
+
) -> Mapping[str, Any]:
|
235 |
+
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
|
236 |
+
tokenizer, batch_size, seq_length, is_pair, framework
|
237 |
+
)
|
238 |
+
# Generate decoder inputs
|
239 |
+
decoder_seq_length = seq_length if not self.use_past else 1
|
240 |
+
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
|
241 |
+
tokenizer, batch_size, decoder_seq_length, is_pair, framework
|
242 |
+
)
|
243 |
+
decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
|
244 |
+
common_inputs = dict(**encoder_inputs, **decoder_inputs)
|
245 |
+
|
246 |
+
if self.use_past:
|
247 |
+
if not is_torch_available():
|
248 |
+
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
|
249 |
+
else:
|
250 |
+
import torch
|
251 |
+
batch, encoder_seq_length = common_inputs["input_ids"].shape
|
252 |
+
decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
|
253 |
+
num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
|
254 |
+
encoder_shape = (
|
255 |
+
batch,
|
256 |
+
num_encoder_attention_heads,
|
257 |
+
encoder_seq_length,
|
258 |
+
self._config.hidden_size // num_encoder_attention_heads,
|
259 |
+
)
|
260 |
+
decoder_past_length = decoder_seq_length
|
261 |
+
decoder_shape = (
|
262 |
+
batch,
|
263 |
+
num_decoder_attention_heads,
|
264 |
+
decoder_past_length,
|
265 |
+
self._config.hidden_size // num_decoder_attention_heads,
|
266 |
+
)
|
267 |
+
common_inputs["decoder_attention_mask"] = torch.cat(
|
268 |
+
[common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
|
269 |
+
)
|
270 |
+
common_inputs["past_key_values"] = []
|
271 |
+
_, num_decoder_layers = self.num_layers
|
272 |
+
|
273 |
+
for _ in range(num_decoder_layers):
|
274 |
+
common_inputs["past_key_values"].append(
|
275 |
+
(
|
276 |
+
torch.zeros(decoder_shape),
|
277 |
+
torch.zeros(decoder_shape),
|
278 |
+
torch.zeros(encoder_shape),
|
279 |
+
torch.zeros(encoder_shape),
|
280 |
+
)
|
281 |
+
)
|
282 |
+
return common_inputs
|
283 |
+
|
284 |
+
def _generate_dummy_inputs_for_causal_lm(
|
285 |
+
self,
|
286 |
+
tokenizer: PreTrainedTokenizer,
|
287 |
+
batch_size: int = -1,
|
288 |
+
seq_length: int = -1,
|
289 |
+
is_pair: bool = False,
|
290 |
+
framework: Optional[TensorType] = None,
|
291 |
+
) -> Mapping[str, Any]:
|
292 |
+
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
|
293 |
+
tokenizer, batch_size, seq_length, is_pair, framework
|
294 |
+
)
|
295 |
+
|
296 |
+
if self.use_past:
|
297 |
+
if not is_torch_available():
|
298 |
+
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
|
299 |
+
else:
|
300 |
+
import torch
|
301 |
+
batch, seqlen = common_inputs["input_ids"].shape
|
302 |
+
past_key_values_length = seqlen
|
303 |
+
_, num_decoder_layers = self.num_layers
|
304 |
+
num_encoder_attention_heads, _ = self.num_attention_heads
|
305 |
+
past_shape = (
|
306 |
+
batch,
|
307 |
+
num_encoder_attention_heads,
|
308 |
+
past_key_values_length,
|
309 |
+
self._config.hidden_size // num_encoder_attention_heads,
|
310 |
+
)
|
311 |
+
mask_dtype = common_inputs["attention_mask"].dtype
|
312 |
+
common_inputs["attention_mask"] = torch.cat(
|
313 |
+
[common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
|
314 |
+
)
|
315 |
+
common_inputs["past_key_values"] = [
|
316 |
+
(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers)
|
317 |
+
]
|
318 |
+
return common_inputs
|
319 |
+
|
320 |
+
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering
|
321 |
+
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
|
322 |
+
self,
|
323 |
+
tokenizer: PreTrainedTokenizer,
|
324 |
+
batch_size: int = -1,
|
325 |
+
seq_length: int = -1,
|
326 |
+
is_pair: bool = False,
|
327 |
+
framework: Optional[TensorType] = None,
|
328 |
+
) -> Mapping[str, Any]:
|
329 |
+
# Copied from OnnxConfig.generate_dummy_inputs
|
330 |
+
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
|
331 |
+
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
|
332 |
+
batch_size = compute_effective_axis_dimension(
|
333 |
+
batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
|
334 |
+
)
|
335 |
+
|
336 |
+
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
|
337 |
+
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
|
338 |
+
seq_length = compute_effective_axis_dimension(
|
339 |
+
seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
|
340 |
+
)
|
341 |
+
|
342 |
+
# Generate dummy inputs according to compute batch and sequence
|
343 |
+
dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
|
344 |
+
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
|
345 |
+
return common_inputs
|
346 |
+
|
347 |
+
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.generate_dummy_inputs
|
348 |
+
def generate_dummy_inputs(
|
349 |
+
self,
|
350 |
+
tokenizer: PreTrainedTokenizer,
|
351 |
+
batch_size: int = -1,
|
352 |
+
seq_length: int = -1,
|
353 |
+
is_pair: bool = False,
|
354 |
+
framework: Optional[TensorType] = None,
|
355 |
+
) -> Mapping[str, Any]:
|
356 |
+
if self.task in ["default", "seq2seq-lm"]:
|
357 |
+
common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
|
358 |
+
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
|
359 |
+
)
|
360 |
+
|
361 |
+
elif self.task == "causal-lm":
|
362 |
+
common_inputs = self._generate_dummy_inputs_for_causal_lm(
|
363 |
+
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
|
364 |
+
)
|
365 |
+
else:
|
366 |
+
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
|
367 |
+
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
|
368 |
+
)
|
369 |
+
|
370 |
+
return common_inputs
|
371 |
+
|
372 |
+
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._flatten_past_key_values_
|
373 |
+
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
|
374 |
+
if self.task in ["default", "seq2seq-lm"]:
|
375 |
+
flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
|
376 |
+
else:
|
377 |
+
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
|
378 |
+
flattened_output, name, idx, t
|
379 |
+
)
|
380 |
+
|
381 |
+
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):
|
382 |
+
if direction not in ["inputs", "outputs"]:
|
383 |
+
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
|
384 |
+
|
385 |
+
name = "past_key_values" if direction == "inputs" else "present"
|
386 |
+
_, num_decoder_layers = self.num_layers
|
387 |
+
|
388 |
+
encoder_sequence = "past_encoder_sequence"
|
389 |
+
decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence"
|
390 |
+
|
391 |
+
for i in range(num_decoder_layers):
|
392 |
+
inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence}
|
393 |
+
inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence}
|
394 |
+
inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence}
|
395 |
+
inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence}
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert Blenderbot checkpoint."""
|
16 |
+
|
17 |
+
import argparse
|
18 |
+
|
19 |
+
import torch
|
20 |
+
|
21 |
+
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
|
22 |
+
from transformers.utils import logging
|
23 |
+
|
24 |
+
|
25 |
+
logging.set_verbosity_info()
|
26 |
+
logger = logging.get_logger(__name__)
|
27 |
+
|
28 |
+
PATTERNS = [
|
29 |
+
["attention", "attn"],
|
30 |
+
["encoder_attention", "encoder_attn"],
|
31 |
+
["q_lin", "q_proj"],
|
32 |
+
["k_lin", "k_proj"],
|
33 |
+
["v_lin", "v_proj"],
|
34 |
+
["out_lin", "out_proj"],
|
35 |
+
["norm_embeddings", "layernorm_embedding"],
|
36 |
+
["position_embeddings", "embed_positions"],
|
37 |
+
["embeddings", "embed_tokens"],
|
38 |
+
["ffn.lin", "fc"],
|
39 |
+
]
|
40 |
+
|
41 |
+
|
42 |
+
def rename_state_dict_key(k):
|
43 |
+
if k == "embeddings.weight":
|
44 |
+
return "shared.weight"
|
45 |
+
|
46 |
+
for parlai_name, hf_name in PATTERNS:
|
47 |
+
k = k.replace(parlai_name, hf_name)
|
48 |
+
|
49 |
+
if k.startswith("encoder"):
|
50 |
+
k = k.replace(".attn", ".self_attn")
|
51 |
+
k = k.replace("norm1", "self_attn_layer_norm")
|
52 |
+
k = k.replace("norm2", "final_layer_norm")
|
53 |
+
elif k.startswith("decoder"):
|
54 |
+
k = k.replace("norm1", "self_attn_layer_norm")
|
55 |
+
k = k.replace("norm2", "encoder_attn_layer_norm")
|
56 |
+
k = k.replace("norm3", "final_layer_norm")
|
57 |
+
return k
|
58 |
+
|
59 |
+
|
60 |
+
def rename_layernorm_keys(sd):
|
61 |
+
keys = [
|
62 |
+
"model.encoder.layernorm_embedding.weight",
|
63 |
+
"model.encoder.layernorm_embedding.bias",
|
64 |
+
"model.decoder.layernorm_embedding.weight",
|
65 |
+
"model.decoder.layernorm_embedding.bias",
|
66 |
+
]
|
67 |
+
for k in keys:
|
68 |
+
v = sd.pop(k)
|
69 |
+
new_k = k.replace("layernorm_embedding", "layer_norm")
|
70 |
+
assert new_k not in sd
|
71 |
+
sd[new_k] = v
|
72 |
+
|
73 |
+
|
74 |
+
IGNORE_KEYS = ["START"]
|
75 |
+
|
76 |
+
|
77 |
+
@torch.no_grad()
|
78 |
+
def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path):
|
79 |
+
"""
|
80 |
+
Copy/paste/tweak model's weights to our BERT structure.
|
81 |
+
"""
|
82 |
+
model = torch.load(checkpoint_path, map_location="cpu")
|
83 |
+
sd = model["model"]
|
84 |
+
cfg = BlenderbotConfig.from_json_file(config_json_path)
|
85 |
+
m = BlenderbotForConditionalGeneration(cfg)
|
86 |
+
valid_keys = m.model.state_dict().keys()
|
87 |
+
failures = []
|
88 |
+
mapping = {}
|
89 |
+
for k, v in sd.items():
|
90 |
+
if k in IGNORE_KEYS:
|
91 |
+
continue
|
92 |
+
|
93 |
+
new_k = rename_state_dict_key(k)
|
94 |
+
if new_k not in valid_keys:
|
95 |
+
failures.append([k, new_k])
|
96 |
+
else:
|
97 |
+
mapping[new_k] = v
|
98 |
+
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
|
99 |
+
rename_layernorm_keys(sd)
|
100 |
+
m.model.load_state_dict(mapping, strict=True)
|
101 |
+
m.half()
|
102 |
+
m.save_pretrained(pytorch_dump_folder_path)
|
103 |
+
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
parser = argparse.ArgumentParser()
|
107 |
+
# Required parameters
|
108 |
+
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
|
109 |
+
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
|
110 |
+
parser.add_argument(
|
111 |
+
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
|
112 |
+
)
|
113 |
+
args = parser.parse_args()
|
114 |
+
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py
ADDED
@@ -0,0 +1,1597 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch Blenderbot model."""
|
16 |
+
|
17 |
+
|
18 |
+
import copy
|
19 |
+
import math
|
20 |
+
import os
|
21 |
+
import warnings
|
22 |
+
from typing import List, Optional, Tuple, Union
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import torch.utils.checkpoint
|
26 |
+
from torch import nn
|
27 |
+
from torch.nn import CrossEntropyLoss
|
28 |
+
|
29 |
+
from ...activations import ACT2FN
|
30 |
+
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
|
31 |
+
from ...modeling_outputs import (
|
32 |
+
BaseModelOutput,
|
33 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
34 |
+
CausalLMOutputWithCrossAttentions,
|
35 |
+
Seq2SeqLMOutput,
|
36 |
+
Seq2SeqModelOutput,
|
37 |
+
)
|
38 |
+
from ...modeling_utils import PreTrainedModel
|
39 |
+
from ...utils import (
|
40 |
+
add_end_docstrings,
|
41 |
+
add_start_docstrings,
|
42 |
+
add_start_docstrings_to_model_forward,
|
43 |
+
logging,
|
44 |
+
replace_return_docstrings,
|
45 |
+
)
|
46 |
+
from ..blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel
|
47 |
+
from .configuration_blenderbot import BlenderbotConfig
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
_CONFIG_FOR_DOC = "BlenderbotConfig"
|
53 |
+
_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
|
54 |
+
|
55 |
+
|
56 |
+
from ..deprecated._archive_maps import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
57 |
+
|
58 |
+
|
59 |
+
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
|
60 |
+
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
|
61 |
+
"""
|
62 |
+
Shift input ids one token to the right.
|
63 |
+
"""
|
64 |
+
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
|
65 |
+
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
|
66 |
+
shifted_input_ids[:, 0] = decoder_start_token_id
|
67 |
+
|
68 |
+
if pad_token_id is None:
|
69 |
+
raise ValueError("self.model.config.pad_token_id has to be defined.")
|
70 |
+
# replace possible -100 values in labels by `pad_token_id`
|
71 |
+
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
|
72 |
+
|
73 |
+
return shifted_input_ids
|
74 |
+
|
75 |
+
|
76 |
+
class BlenderbotLearnedPositionalEmbedding(nn.Embedding):
|
77 |
+
"""
|
78 |
+
This module learns positional embeddings up to a fixed maximum size.
|
79 |
+
"""
|
80 |
+
|
81 |
+
def __init__(self, num_embeddings: int, embedding_dim: int):
|
82 |
+
super().__init__(num_embeddings, embedding_dim)
|
83 |
+
|
84 |
+
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
|
85 |
+
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
|
86 |
+
bsz, seq_len = input_ids_shape[:2]
|
87 |
+
positions = torch.arange(
|
88 |
+
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
|
89 |
+
)
|
90 |
+
return super().forward(positions)
|
91 |
+
|
92 |
+
|
93 |
+
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Blenderbot
|
94 |
+
class BlenderbotAttention(nn.Module):
|
95 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
96 |
+
|
97 |
+
def __init__(
|
98 |
+
self,
|
99 |
+
embed_dim: int,
|
100 |
+
num_heads: int,
|
101 |
+
dropout: float = 0.0,
|
102 |
+
is_decoder: bool = False,
|
103 |
+
bias: bool = True,
|
104 |
+
is_causal: bool = False,
|
105 |
+
config: Optional[BlenderbotConfig] = None,
|
106 |
+
):
|
107 |
+
super().__init__()
|
108 |
+
self.embed_dim = embed_dim
|
109 |
+
self.num_heads = num_heads
|
110 |
+
self.dropout = dropout
|
111 |
+
self.head_dim = embed_dim // num_heads
|
112 |
+
self.config = config
|
113 |
+
|
114 |
+
if (self.head_dim * num_heads) != self.embed_dim:
|
115 |
+
raise ValueError(
|
116 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
117 |
+
f" and `num_heads`: {num_heads})."
|
118 |
+
)
|
119 |
+
self.scaling = self.head_dim**-0.5
|
120 |
+
self.is_decoder = is_decoder
|
121 |
+
self.is_causal = is_causal
|
122 |
+
|
123 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
124 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
125 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
126 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
127 |
+
|
128 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
129 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
130 |
+
|
131 |
+
def forward(
|
132 |
+
self,
|
133 |
+
hidden_states: torch.Tensor,
|
134 |
+
key_value_states: Optional[torch.Tensor] = None,
|
135 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
136 |
+
attention_mask: Optional[torch.Tensor] = None,
|
137 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
138 |
+
output_attentions: bool = False,
|
139 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
140 |
+
"""Input shape: Batch x Time x Channel"""
|
141 |
+
|
142 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
143 |
+
# for the decoder
|
144 |
+
is_cross_attention = key_value_states is not None
|
145 |
+
|
146 |
+
bsz, tgt_len, _ = hidden_states.size()
|
147 |
+
|
148 |
+
# get query proj
|
149 |
+
query_states = self.q_proj(hidden_states) * self.scaling
|
150 |
+
# get key, value proj
|
151 |
+
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
|
152 |
+
# is checking that the `sequence_length` of the `past_key_value` is the same as
|
153 |
+
# the provided `key_value_states` to support prefix tuning
|
154 |
+
if (
|
155 |
+
is_cross_attention
|
156 |
+
and past_key_value is not None
|
157 |
+
and past_key_value[0].shape[2] == key_value_states.shape[1]
|
158 |
+
):
|
159 |
+
# reuse k,v, cross_attentions
|
160 |
+
key_states = past_key_value[0]
|
161 |
+
value_states = past_key_value[1]
|
162 |
+
elif is_cross_attention:
|
163 |
+
# cross_attentions
|
164 |
+
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
|
165 |
+
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
|
166 |
+
elif past_key_value is not None:
|
167 |
+
# reuse k, v, self_attention
|
168 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
169 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
170 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
171 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
172 |
+
else:
|
173 |
+
# self_attention
|
174 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
175 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
176 |
+
|
177 |
+
if self.is_decoder:
|
178 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
179 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
180 |
+
# key/value_states (first "if" case)
|
181 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
182 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
183 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
184 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
185 |
+
past_key_value = (key_states, value_states)
|
186 |
+
|
187 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
188 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
189 |
+
key_states = key_states.reshape(*proj_shape)
|
190 |
+
value_states = value_states.reshape(*proj_shape)
|
191 |
+
|
192 |
+
src_len = key_states.size(1)
|
193 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
194 |
+
|
195 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
196 |
+
raise ValueError(
|
197 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
198 |
+
f" {attn_weights.size()}"
|
199 |
+
)
|
200 |
+
|
201 |
+
if attention_mask is not None:
|
202 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
203 |
+
raise ValueError(
|
204 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
205 |
+
)
|
206 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
207 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
208 |
+
|
209 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
210 |
+
|
211 |
+
if layer_head_mask is not None:
|
212 |
+
if layer_head_mask.size() != (self.num_heads,):
|
213 |
+
raise ValueError(
|
214 |
+
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
|
215 |
+
f" {layer_head_mask.size()}"
|
216 |
+
)
|
217 |
+
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
218 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
219 |
+
|
220 |
+
if output_attentions:
|
221 |
+
# this operation is a bit awkward, but it's required to
|
222 |
+
# make sure that attn_weights keeps its gradient.
|
223 |
+
# In order to do so, attn_weights have to be reshaped
|
224 |
+
# twice and have to be reused in the following
|
225 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
226 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
227 |
+
else:
|
228 |
+
attn_weights_reshaped = None
|
229 |
+
|
230 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
231 |
+
|
232 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
233 |
+
|
234 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
235 |
+
raise ValueError(
|
236 |
+
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
|
237 |
+
f" {attn_output.size()}"
|
238 |
+
)
|
239 |
+
|
240 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
241 |
+
attn_output = attn_output.transpose(1, 2)
|
242 |
+
|
243 |
+
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
244 |
+
# partitioned across GPUs when using tensor-parallelism.
|
245 |
+
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
246 |
+
|
247 |
+
attn_output = self.out_proj(attn_output)
|
248 |
+
|
249 |
+
return attn_output, attn_weights_reshaped, past_key_value
|
250 |
+
|
251 |
+
|
252 |
+
BLENDERBOT_ATTENTION_CLASSES = {"eager": BlenderbotAttention}
|
253 |
+
|
254 |
+
|
255 |
+
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Blenderbot, MBART->BLENDERBOT
|
256 |
+
class BlenderbotEncoderLayer(nn.Module):
|
257 |
+
def __init__(self, config: BlenderbotConfig):
|
258 |
+
super().__init__()
|
259 |
+
self.embed_dim = config.d_model
|
260 |
+
|
261 |
+
self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
|
262 |
+
embed_dim=self.embed_dim,
|
263 |
+
num_heads=config.encoder_attention_heads,
|
264 |
+
dropout=config.attention_dropout,
|
265 |
+
config=config,
|
266 |
+
)
|
267 |
+
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
268 |
+
self.dropout = config.dropout
|
269 |
+
self.activation_fn = ACT2FN[config.activation_function]
|
270 |
+
self.activation_dropout = config.activation_dropout
|
271 |
+
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
|
272 |
+
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
|
273 |
+
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
|
274 |
+
|
275 |
+
def forward(
|
276 |
+
self,
|
277 |
+
hidden_states: torch.Tensor,
|
278 |
+
attention_mask: torch.Tensor,
|
279 |
+
layer_head_mask: torch.Tensor,
|
280 |
+
output_attentions: bool = False,
|
281 |
+
) -> torch.Tensor:
|
282 |
+
"""
|
283 |
+
Args:
|
284 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
285 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
286 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
287 |
+
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
|
288 |
+
`(encoder_attention_heads,)`.
|
289 |
+
output_attentions (`bool`, *optional*):
|
290 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
291 |
+
returned tensors for more detail.
|
292 |
+
"""
|
293 |
+
residual = hidden_states
|
294 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
295 |
+
hidden_states, attn_weights, _ = self.self_attn(
|
296 |
+
hidden_states=hidden_states,
|
297 |
+
attention_mask=attention_mask,
|
298 |
+
layer_head_mask=layer_head_mask,
|
299 |
+
output_attentions=output_attentions,
|
300 |
+
)
|
301 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
302 |
+
hidden_states = residual + hidden_states
|
303 |
+
|
304 |
+
residual = hidden_states
|
305 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
306 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
307 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
|
308 |
+
hidden_states = self.fc2(hidden_states)
|
309 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
310 |
+
hidden_states = residual + hidden_states
|
311 |
+
|
312 |
+
if hidden_states.dtype == torch.float16 and (
|
313 |
+
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
|
314 |
+
):
|
315 |
+
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
|
316 |
+
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
|
317 |
+
|
318 |
+
outputs = (hidden_states,)
|
319 |
+
|
320 |
+
if output_attentions:
|
321 |
+
outputs += (attn_weights,)
|
322 |
+
|
323 |
+
return outputs
|
324 |
+
|
325 |
+
|
326 |
+
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Blenderbot, MBART->BLENDERBOT
|
327 |
+
class BlenderbotDecoderLayer(nn.Module):
|
328 |
+
def __init__(self, config: BlenderbotConfig):
|
329 |
+
super().__init__()
|
330 |
+
self.embed_dim = config.d_model
|
331 |
+
|
332 |
+
self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
|
333 |
+
embed_dim=self.embed_dim,
|
334 |
+
num_heads=config.decoder_attention_heads,
|
335 |
+
dropout=config.attention_dropout,
|
336 |
+
is_decoder=True,
|
337 |
+
is_causal=True,
|
338 |
+
config=config,
|
339 |
+
)
|
340 |
+
self.dropout = config.dropout
|
341 |
+
self.activation_fn = ACT2FN[config.activation_function]
|
342 |
+
self.activation_dropout = config.activation_dropout
|
343 |
+
|
344 |
+
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
345 |
+
self.encoder_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
|
346 |
+
self.embed_dim,
|
347 |
+
config.decoder_attention_heads,
|
348 |
+
dropout=config.attention_dropout,
|
349 |
+
is_decoder=True,
|
350 |
+
config=config,
|
351 |
+
)
|
352 |
+
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
353 |
+
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
|
354 |
+
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
|
355 |
+
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
|
356 |
+
|
357 |
+
def forward(
|
358 |
+
self,
|
359 |
+
hidden_states: torch.Tensor,
|
360 |
+
attention_mask: Optional[torch.Tensor] = None,
|
361 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
362 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
363 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
364 |
+
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
|
365 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
366 |
+
output_attentions: Optional[bool] = False,
|
367 |
+
use_cache: Optional[bool] = True,
|
368 |
+
) -> torch.Tensor:
|
369 |
+
"""
|
370 |
+
Args:
|
371 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
372 |
+
attention_mask (`torch.FloatTensor`): attention mask of size
|
373 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
374 |
+
encoder_hidden_states (`torch.FloatTensor`):
|
375 |
+
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
|
376 |
+
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
|
377 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
378 |
+
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
|
379 |
+
`(encoder_attention_heads,)`.
|
380 |
+
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
|
381 |
+
size `(decoder_attention_heads,)`.
|
382 |
+
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
|
383 |
+
output_attentions (`bool`, *optional*):
|
384 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
385 |
+
returned tensors for more detail.
|
386 |
+
"""
|
387 |
+
residual = hidden_states
|
388 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
389 |
+
|
390 |
+
# Self Attention
|
391 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
392 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
393 |
+
# add present self-attn cache to positions 1,2 of present_key_value tuple
|
394 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
395 |
+
hidden_states=hidden_states,
|
396 |
+
past_key_value=self_attn_past_key_value,
|
397 |
+
attention_mask=attention_mask,
|
398 |
+
layer_head_mask=layer_head_mask,
|
399 |
+
output_attentions=output_attentions,
|
400 |
+
)
|
401 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
402 |
+
hidden_states = residual + hidden_states
|
403 |
+
|
404 |
+
# Cross-Attention Block
|
405 |
+
cross_attn_present_key_value = None
|
406 |
+
cross_attn_weights = None
|
407 |
+
if encoder_hidden_states is not None:
|
408 |
+
residual = hidden_states
|
409 |
+
hidden_states = self.encoder_attn_layer_norm(hidden_states)
|
410 |
+
|
411 |
+
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
|
412 |
+
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
|
413 |
+
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
|
414 |
+
hidden_states=hidden_states,
|
415 |
+
key_value_states=encoder_hidden_states,
|
416 |
+
attention_mask=encoder_attention_mask,
|
417 |
+
layer_head_mask=cross_attn_layer_head_mask,
|
418 |
+
past_key_value=cross_attn_past_key_value,
|
419 |
+
output_attentions=output_attentions,
|
420 |
+
)
|
421 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
422 |
+
hidden_states = residual + hidden_states
|
423 |
+
|
424 |
+
# add cross-attn to positions 3,4 of present_key_value tuple
|
425 |
+
present_key_value = present_key_value + cross_attn_present_key_value
|
426 |
+
|
427 |
+
# Fully Connected
|
428 |
+
residual = hidden_states
|
429 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
430 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
431 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
|
432 |
+
hidden_states = self.fc2(hidden_states)
|
433 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
434 |
+
hidden_states = residual + hidden_states
|
435 |
+
|
436 |
+
outputs = (hidden_states,)
|
437 |
+
|
438 |
+
if output_attentions:
|
439 |
+
outputs += (self_attn_weights, cross_attn_weights)
|
440 |
+
|
441 |
+
if use_cache:
|
442 |
+
outputs += (present_key_value,)
|
443 |
+
|
444 |
+
return outputs
|
445 |
+
|
446 |
+
|
447 |
+
class BlenderbotPreTrainedModel(PreTrainedModel):
|
448 |
+
config_class = BlenderbotConfig
|
449 |
+
base_model_prefix = "model"
|
450 |
+
supports_gradient_checkpointing = True
|
451 |
+
|
452 |
+
def _init_weights(self, module):
|
453 |
+
std = self.config.init_std
|
454 |
+
if isinstance(module, nn.Linear):
|
455 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
456 |
+
if module.bias is not None:
|
457 |
+
module.bias.data.zero_()
|
458 |
+
elif isinstance(module, nn.Embedding):
|
459 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
460 |
+
if module.padding_idx is not None:
|
461 |
+
module.weight.data[module.padding_idx].zero_()
|
462 |
+
|
463 |
+
@property
|
464 |
+
def dummy_inputs(self):
|
465 |
+
pad_token = self.config.pad_token_id
|
466 |
+
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
|
467 |
+
dummy_inputs = {
|
468 |
+
"attention_mask": input_ids.ne(pad_token),
|
469 |
+
"input_ids": input_ids,
|
470 |
+
"decoder_input_ids": input_ids,
|
471 |
+
}
|
472 |
+
return dummy_inputs
|
473 |
+
|
474 |
+
|
475 |
+
BLENDERBOT_START_DOCSTRING = r"""
|
476 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
477 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
478 |
+
etc.)
|
479 |
+
|
480 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
481 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
482 |
+
and behavior.
|
483 |
+
|
484 |
+
Parameters:
|
485 |
+
config ([`BlenderbotConfig`]):
|
486 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
487 |
+
load the weights associated with the model, only the configuration. Check out the
|
488 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
489 |
+
"""
|
490 |
+
|
491 |
+
BLENDERBOT_GENERATION_EXAMPLE = r"""
|
492 |
+
Conversation example:
|
493 |
+
|
494 |
+
```python
|
495 |
+
>>> from transformers import AutoTokenizer, BlenderbotForConditionalGeneration
|
496 |
+
|
497 |
+
>>> mname = "facebook/blenderbot-400M-distill"
|
498 |
+
>>> model = BlenderbotForConditionalGeneration.from_pretrained(mname)
|
499 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(mname)
|
500 |
+
>>> UTTERANCE = "My friends are cool but they eat too many carbs."
|
501 |
+
>>> print("Human: ", UTTERANCE)
|
502 |
+
Human: My friends are cool but they eat too many carbs.
|
503 |
+
|
504 |
+
>>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
|
505 |
+
>>> reply_ids = model.generate(**inputs)
|
506 |
+
>>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
|
507 |
+
Bot: That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?
|
508 |
+
|
509 |
+
>>> REPLY = "I'm not sure"
|
510 |
+
>>> print("Human: ", REPLY)
|
511 |
+
Human: I'm not sure
|
512 |
+
|
513 |
+
>>> NEXT_UTTERANCE = (
|
514 |
+
... "My friends are cool but they eat too many carbs.</s> <s>That's unfortunate. "
|
515 |
+
... "Are they trying to lose weight or are they just trying to be healthier?</s> "
|
516 |
+
... "<s> I'm not sure."
|
517 |
+
... )
|
518 |
+
>>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
|
519 |
+
>>> next_reply_ids = model.generate(**inputs)
|
520 |
+
>>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
|
521 |
+
Bot: I see. Well, it's good that they're trying to change their eating habits.
|
522 |
+
```
|
523 |
+
"""
|
524 |
+
|
525 |
+
BLENDERBOT_INPUTS_DOCSTRING = r"""
|
526 |
+
Args:
|
527 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
528 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
529 |
+
it.
|
530 |
+
|
531 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
532 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
533 |
+
|
534 |
+
[What are input IDs?](../glossary#input-ids)
|
535 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
536 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
537 |
+
|
538 |
+
- 1 for tokens that are **not masked**,
|
539 |
+
- 0 for tokens that are **masked**.
|
540 |
+
|
541 |
+
[What are attention masks?](../glossary#attention-mask)
|
542 |
+
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
543 |
+
Indices of decoder input sequence tokens in the vocabulary.
|
544 |
+
|
545 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
546 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
547 |
+
|
548 |
+
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
549 |
+
|
550 |
+
Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
|
551 |
+
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
552 |
+
`past_key_values`).
|
553 |
+
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
554 |
+
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
555 |
+
be used by default.
|
556 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
557 |
+
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
558 |
+
|
559 |
+
- 1 indicates the head is **not masked**,
|
560 |
+
- 0 indicates the head is **masked**.
|
561 |
+
|
562 |
+
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
563 |
+
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
|
564 |
+
|
565 |
+
- 1 indicates the head is **not masked**,
|
566 |
+
- 0 indicates the head is **masked**.
|
567 |
+
|
568 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
569 |
+
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
|
570 |
+
1]`:
|
571 |
+
|
572 |
+
- 1 indicates the head is **not masked**,
|
573 |
+
- 0 indicates the head is **masked**.
|
574 |
+
|
575 |
+
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
|
576 |
+
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
577 |
+
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
|
578 |
+
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
|
579 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
580 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
581 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
582 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
583 |
+
|
584 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
585 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
586 |
+
|
587 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
588 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
589 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
590 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
591 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
592 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
593 |
+
than the model's internal embedding lookup matrix.
|
594 |
+
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
|
595 |
+
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
|
596 |
+
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
|
597 |
+
input (see `past_key_values`). This is useful if you want more control over how to convert
|
598 |
+
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
|
599 |
+
|
600 |
+
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
|
601 |
+
of `inputs_embeds`.
|
602 |
+
use_cache (`bool`, *optional*):
|
603 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
604 |
+
`past_key_values`).
|
605 |
+
output_attentions (`bool`, *optional*):
|
606 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
607 |
+
tensors for more detail.
|
608 |
+
output_hidden_states (`bool`, *optional*):
|
609 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
610 |
+
more detail.
|
611 |
+
return_dict (`bool`, *optional*):
|
612 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
613 |
+
"""
|
614 |
+
|
615 |
+
|
616 |
+
class BlenderbotEncoder(BlenderbotPreTrainedModel):
|
617 |
+
"""
|
618 |
+
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
|
619 |
+
[`BlenderbotEncoderLayer`].
|
620 |
+
|
621 |
+
Args:
|
622 |
+
config: BlenderbotConfig
|
623 |
+
embed_tokens (nn.Embedding): output embedding
|
624 |
+
"""
|
625 |
+
|
626 |
+
def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):
|
627 |
+
super().__init__(config)
|
628 |
+
|
629 |
+
self.dropout = config.dropout
|
630 |
+
self.layerdrop = config.encoder_layerdrop
|
631 |
+
|
632 |
+
embed_dim = config.d_model
|
633 |
+
self.padding_idx = config.pad_token_id
|
634 |
+
self.max_source_positions = config.max_position_embeddings
|
635 |
+
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
|
636 |
+
|
637 |
+
if embed_tokens is not None:
|
638 |
+
self.embed_tokens = embed_tokens
|
639 |
+
else:
|
640 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
|
641 |
+
|
642 |
+
self.embed_positions = BlenderbotLearnedPositionalEmbedding(
|
643 |
+
config.max_position_embeddings,
|
644 |
+
embed_dim,
|
645 |
+
)
|
646 |
+
self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)])
|
647 |
+
self.layer_norm = nn.LayerNorm(config.d_model)
|
648 |
+
|
649 |
+
self.gradient_checkpointing = False
|
650 |
+
# Initialize weights and apply final processing
|
651 |
+
self.post_init()
|
652 |
+
|
653 |
+
def forward(
|
654 |
+
self,
|
655 |
+
input_ids=None,
|
656 |
+
attention_mask=None,
|
657 |
+
head_mask=None,
|
658 |
+
inputs_embeds=None,
|
659 |
+
output_attentions=None,
|
660 |
+
output_hidden_states=None,
|
661 |
+
return_dict=None,
|
662 |
+
):
|
663 |
+
r"""
|
664 |
+
Args:
|
665 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
666 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
667 |
+
provide it.
|
668 |
+
|
669 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
670 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
671 |
+
|
672 |
+
[What are input IDs?](../glossary#input-ids)
|
673 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
674 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
675 |
+
|
676 |
+
- 1 for tokens that are **not masked**,
|
677 |
+
- 0 for tokens that are **masked**.
|
678 |
+
|
679 |
+
[What are attention masks?](../glossary#attention-mask)
|
680 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
681 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
682 |
+
|
683 |
+
- 1 indicates the head is **not masked**,
|
684 |
+
- 0 indicates the head is **masked**.
|
685 |
+
|
686 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
687 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
688 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
689 |
+
than the model's internal embedding lookup matrix.
|
690 |
+
output_attentions (`bool`, *optional*):
|
691 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
692 |
+
returned tensors for more detail.
|
693 |
+
output_hidden_states (`bool`, *optional*):
|
694 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
695 |
+
for more detail.
|
696 |
+
return_dict (`bool`, *optional*):
|
697 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
698 |
+
"""
|
699 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
700 |
+
output_hidden_states = (
|
701 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
702 |
+
)
|
703 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
704 |
+
|
705 |
+
# retrieve input_ids and inputs_embeds
|
706 |
+
if input_ids is not None and inputs_embeds is not None:
|
707 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
708 |
+
elif input_ids is not None:
|
709 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
710 |
+
input_shape = input_ids.size()
|
711 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
712 |
+
elif inputs_embeds is not None:
|
713 |
+
input_shape = inputs_embeds.size()[:-1]
|
714 |
+
else:
|
715 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
716 |
+
|
717 |
+
if inputs_embeds is None:
|
718 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
719 |
+
|
720 |
+
embed_pos = self.embed_positions(input_shape)
|
721 |
+
|
722 |
+
hidden_states = inputs_embeds + embed_pos
|
723 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
724 |
+
|
725 |
+
# expand attention_mask
|
726 |
+
if attention_mask is not None:
|
727 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
728 |
+
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
|
729 |
+
|
730 |
+
encoder_states = () if output_hidden_states else None
|
731 |
+
all_attentions = () if output_attentions else None
|
732 |
+
|
733 |
+
# check if head_mask has a correct number of layers specified if desired
|
734 |
+
if head_mask is not None:
|
735 |
+
if head_mask.size()[0] != len(self.layers):
|
736 |
+
raise ValueError(
|
737 |
+
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
|
738 |
+
f" {head_mask.size()[0]}."
|
739 |
+
)
|
740 |
+
for idx, encoder_layer in enumerate(self.layers):
|
741 |
+
if output_hidden_states:
|
742 |
+
encoder_states = encoder_states + (hidden_states,)
|
743 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
744 |
+
to_drop = False
|
745 |
+
if self.training:
|
746 |
+
dropout_probability = torch.rand([])
|
747 |
+
if dropout_probability < self.layerdrop: # skip the layer
|
748 |
+
to_drop = True
|
749 |
+
|
750 |
+
if to_drop:
|
751 |
+
layer_outputs = (None, None)
|
752 |
+
else:
|
753 |
+
if self.gradient_checkpointing and self.training:
|
754 |
+
layer_outputs = self._gradient_checkpointing_func(
|
755 |
+
encoder_layer.__call__,
|
756 |
+
hidden_states,
|
757 |
+
attention_mask,
|
758 |
+
(head_mask[idx] if head_mask is not None else None),
|
759 |
+
output_attentions,
|
760 |
+
)
|
761 |
+
else:
|
762 |
+
layer_outputs = encoder_layer(
|
763 |
+
hidden_states,
|
764 |
+
attention_mask,
|
765 |
+
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
766 |
+
output_attentions=output_attentions,
|
767 |
+
)
|
768 |
+
|
769 |
+
hidden_states = layer_outputs[0]
|
770 |
+
|
771 |
+
if output_attentions:
|
772 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
773 |
+
|
774 |
+
# add final layer norm
|
775 |
+
hidden_states = self.layer_norm(hidden_states)
|
776 |
+
|
777 |
+
if output_hidden_states:
|
778 |
+
encoder_states = encoder_states + (hidden_states,)
|
779 |
+
|
780 |
+
if not return_dict:
|
781 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
782 |
+
return BaseModelOutput(
|
783 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
784 |
+
)
|
785 |
+
|
786 |
+
|
787 |
+
class BlenderbotDecoder(BlenderbotPreTrainedModel):
|
788 |
+
"""
|
789 |
+
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotDecoderLayer`]
|
790 |
+
|
791 |
+
Args:
|
792 |
+
config: BlenderbotConfig
|
793 |
+
embed_tokens (nn.Embedding): output embedding
|
794 |
+
"""
|
795 |
+
|
796 |
+
def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):
|
797 |
+
super().__init__(config)
|
798 |
+
self.dropout = config.dropout
|
799 |
+
self.layerdrop = config.decoder_layerdrop
|
800 |
+
self.padding_idx = config.pad_token_id
|
801 |
+
self.max_target_positions = config.max_position_embeddings
|
802 |
+
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
|
803 |
+
|
804 |
+
if embed_tokens is not None:
|
805 |
+
self.embed_tokens = embed_tokens
|
806 |
+
else:
|
807 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
|
808 |
+
|
809 |
+
self.embed_positions = BlenderbotLearnedPositionalEmbedding(
|
810 |
+
config.max_position_embeddings,
|
811 |
+
config.d_model,
|
812 |
+
)
|
813 |
+
self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)])
|
814 |
+
self.layer_norm = nn.LayerNorm(config.d_model)
|
815 |
+
|
816 |
+
self.gradient_checkpointing = False
|
817 |
+
# Initialize weights and apply final processing
|
818 |
+
self.post_init()
|
819 |
+
|
820 |
+
def get_input_embeddings(self):
|
821 |
+
return self.embed_tokens
|
822 |
+
|
823 |
+
def set_input_embeddings(self, value):
|
824 |
+
self.embed_tokens = value
|
825 |
+
|
826 |
+
def forward(
|
827 |
+
self,
|
828 |
+
input_ids=None,
|
829 |
+
attention_mask=None,
|
830 |
+
encoder_hidden_states=None,
|
831 |
+
encoder_attention_mask=None,
|
832 |
+
head_mask=None,
|
833 |
+
cross_attn_head_mask=None,
|
834 |
+
past_key_values=None,
|
835 |
+
inputs_embeds=None,
|
836 |
+
use_cache=None,
|
837 |
+
output_attentions=None,
|
838 |
+
output_hidden_states=None,
|
839 |
+
return_dict=None,
|
840 |
+
):
|
841 |
+
r"""
|
842 |
+
Args:
|
843 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
844 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
845 |
+
provide it.
|
846 |
+
|
847 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
848 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
849 |
+
|
850 |
+
[What are input IDs?](../glossary#input-ids)
|
851 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
852 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
853 |
+
|
854 |
+
- 1 for tokens that are **not masked**,
|
855 |
+
- 0 for tokens that are **masked**.
|
856 |
+
|
857 |
+
[What are attention masks?](../glossary#attention-mask)
|
858 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
|
859 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
|
860 |
+
of the decoder.
|
861 |
+
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
|
862 |
+
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
|
863 |
+
selected in `[0, 1]`:
|
864 |
+
|
865 |
+
- 1 for tokens that are **not masked**,
|
866 |
+
- 0 for tokens that are **masked**.
|
867 |
+
|
868 |
+
[What are attention masks?](../glossary#attention-mask)
|
869 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
870 |
+
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0,
|
871 |
+
1]`:
|
872 |
+
|
873 |
+
- 1 indicates the head is **not masked**,
|
874 |
+
- 0 indicates the head is **masked**.
|
875 |
+
|
876 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
877 |
+
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
|
878 |
+
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
|
879 |
+
|
880 |
+
- 1 indicates the head is **not masked**,
|
881 |
+
- 0 indicates the head is **masked**.
|
882 |
+
|
883 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
884 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
885 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
886 |
+
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
887 |
+
|
888 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
|
889 |
+
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
890 |
+
|
891 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
|
892 |
+
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
|
893 |
+
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
894 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
895 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
896 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
897 |
+
than the model's internal embedding lookup matrix.
|
898 |
+
output_attentions (`bool`, *optional*):
|
899 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
900 |
+
returned tensors for more detail.
|
901 |
+
output_hidden_states (`bool`, *optional*):
|
902 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
903 |
+
for more detail.
|
904 |
+
return_dict (`bool`, *optional*):
|
905 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
906 |
+
"""
|
907 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
908 |
+
output_hidden_states = (
|
909 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
910 |
+
)
|
911 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
912 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
913 |
+
|
914 |
+
# retrieve input_ids and inputs_embeds
|
915 |
+
if input_ids is not None and inputs_embeds is not None:
|
916 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
917 |
+
elif input_ids is not None:
|
918 |
+
input_shape = input_ids.size()
|
919 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
920 |
+
elif inputs_embeds is not None:
|
921 |
+
input_shape = inputs_embeds.size()[:-1]
|
922 |
+
else:
|
923 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
924 |
+
|
925 |
+
# past_key_values_length
|
926 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
927 |
+
|
928 |
+
if inputs_embeds is None:
|
929 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
930 |
+
|
931 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
932 |
+
attention_mask, input_shape, inputs_embeds, past_key_values_length
|
933 |
+
)
|
934 |
+
|
935 |
+
# expand encoder attention mask
|
936 |
+
if encoder_hidden_states is not None and encoder_attention_mask is not None:
|
937 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
938 |
+
encoder_attention_mask = _prepare_4d_attention_mask(
|
939 |
+
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
|
940 |
+
)
|
941 |
+
|
942 |
+
# embed positions
|
943 |
+
positions = self.embed_positions(input_shape, past_key_values_length)
|
944 |
+
|
945 |
+
hidden_states = inputs_embeds + positions
|
946 |
+
|
947 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
948 |
+
|
949 |
+
if self.gradient_checkpointing and self.training:
|
950 |
+
if use_cache:
|
951 |
+
logger.warning(
|
952 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
953 |
+
)
|
954 |
+
use_cache = False
|
955 |
+
# decoder layers
|
956 |
+
all_hidden_states = () if output_hidden_states else None
|
957 |
+
all_self_attns = () if output_attentions else None
|
958 |
+
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
|
959 |
+
next_decoder_cache = () if use_cache else None
|
960 |
+
|
961 |
+
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
|
962 |
+
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
|
963 |
+
if attn_mask is not None:
|
964 |
+
if attn_mask.size()[0] != len(self.layers):
|
965 |
+
raise ValueError(
|
966 |
+
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
|
967 |
+
f" {head_mask.size()[0]}."
|
968 |
+
)
|
969 |
+
for idx, decoder_layer in enumerate(self.layers):
|
970 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
971 |
+
if output_hidden_states:
|
972 |
+
all_hidden_states += (hidden_states,)
|
973 |
+
if self.training:
|
974 |
+
dropout_probability = torch.rand([])
|
975 |
+
if dropout_probability < self.layerdrop:
|
976 |
+
continue
|
977 |
+
|
978 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
979 |
+
|
980 |
+
if self.gradient_checkpointing and self.training:
|
981 |
+
layer_outputs = self._gradient_checkpointing_func(
|
982 |
+
decoder_layer.__call__,
|
983 |
+
hidden_states,
|
984 |
+
attention_mask,
|
985 |
+
encoder_hidden_states,
|
986 |
+
encoder_attention_mask,
|
987 |
+
head_mask[idx] if head_mask is not None else None,
|
988 |
+
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
|
989 |
+
None,
|
990 |
+
output_attentions,
|
991 |
+
use_cache,
|
992 |
+
)
|
993 |
+
else:
|
994 |
+
layer_outputs = decoder_layer(
|
995 |
+
hidden_states,
|
996 |
+
attention_mask=attention_mask,
|
997 |
+
encoder_hidden_states=encoder_hidden_states,
|
998 |
+
encoder_attention_mask=encoder_attention_mask,
|
999 |
+
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
1000 |
+
cross_attn_layer_head_mask=(
|
1001 |
+
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
|
1002 |
+
),
|
1003 |
+
past_key_value=past_key_value,
|
1004 |
+
output_attentions=output_attentions,
|
1005 |
+
use_cache=use_cache,
|
1006 |
+
)
|
1007 |
+
hidden_states = layer_outputs[0]
|
1008 |
+
|
1009 |
+
if use_cache:
|
1010 |
+
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
|
1011 |
+
|
1012 |
+
if output_attentions:
|
1013 |
+
all_self_attns += (layer_outputs[1],)
|
1014 |
+
|
1015 |
+
if encoder_hidden_states is not None:
|
1016 |
+
all_cross_attentions += (layer_outputs[2],)
|
1017 |
+
|
1018 |
+
# add final layer norm
|
1019 |
+
hidden_states = self.layer_norm(hidden_states)
|
1020 |
+
|
1021 |
+
# add hidden states from the last decoder layer
|
1022 |
+
if output_hidden_states:
|
1023 |
+
all_hidden_states += (hidden_states,)
|
1024 |
+
|
1025 |
+
next_cache = next_decoder_cache if use_cache else None
|
1026 |
+
if not return_dict:
|
1027 |
+
return tuple(
|
1028 |
+
v
|
1029 |
+
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
|
1030 |
+
if v is not None
|
1031 |
+
)
|
1032 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
1033 |
+
last_hidden_state=hidden_states,
|
1034 |
+
past_key_values=next_cache,
|
1035 |
+
hidden_states=all_hidden_states,
|
1036 |
+
attentions=all_self_attns,
|
1037 |
+
cross_attentions=all_cross_attentions,
|
1038 |
+
)
|
1039 |
+
|
1040 |
+
|
1041 |
+
@add_start_docstrings(
|
1042 |
+
"The bare Blenderbot Model outputting raw hidden-states without any specific head on top.",
|
1043 |
+
BLENDERBOT_START_DOCSTRING,
|
1044 |
+
)
|
1045 |
+
class BlenderbotModel(BlenderbotPreTrainedModel):
|
1046 |
+
_tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]
|
1047 |
+
|
1048 |
+
def __init__(self, config: BlenderbotConfig):
|
1049 |
+
super().__init__(config)
|
1050 |
+
|
1051 |
+
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
|
1052 |
+
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
|
1053 |
+
|
1054 |
+
self.encoder = BlenderbotEncoder(config, self.shared)
|
1055 |
+
self.decoder = BlenderbotDecoder(config, self.shared)
|
1056 |
+
|
1057 |
+
# Initialize weights and apply final processing
|
1058 |
+
self.post_init()
|
1059 |
+
|
1060 |
+
@classmethod
|
1061 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
1062 |
+
if pretrained_model_name_or_path == "facebook/blenderbot-90M":
|
1063 |
+
warnings.warn(
|
1064 |
+
"The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
|
1065 |
+
" checkpoint `facebook/small_blenderbot-90M` with"
|
1066 |
+
" `BlenderbotSmallModel.from_pretrained('facebook/small_blenderbot-90M')` instead.",
|
1067 |
+
FutureWarning,
|
1068 |
+
)
|
1069 |
+
return BlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path)
|
1070 |
+
|
1071 |
+
return super(BlenderbotModel, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
1072 |
+
|
1073 |
+
def get_input_embeddings(self):
|
1074 |
+
return self.shared
|
1075 |
+
|
1076 |
+
def set_input_embeddings(self, value):
|
1077 |
+
self.shared = value
|
1078 |
+
self.encoder.embed_tokens = self.shared
|
1079 |
+
self.decoder.embed_tokens = self.shared
|
1080 |
+
|
1081 |
+
def get_encoder(self):
|
1082 |
+
return self.encoder
|
1083 |
+
|
1084 |
+
def get_decoder(self):
|
1085 |
+
return self.decoder
|
1086 |
+
|
1087 |
+
@add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
|
1088 |
+
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
|
1089 |
+
def forward(
|
1090 |
+
self,
|
1091 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1092 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1093 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
1094 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
1095 |
+
head_mask: Optional[torch.Tensor] = None,
|
1096 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
1097 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
1098 |
+
encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
|
1099 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1100 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1101 |
+
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1102 |
+
use_cache: Optional[bool] = None,
|
1103 |
+
output_attentions: Optional[bool] = None,
|
1104 |
+
output_hidden_states: Optional[bool] = None,
|
1105 |
+
return_dict: Optional[bool] = None,
|
1106 |
+
) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
|
1107 |
+
r"""
|
1108 |
+
Returns:
|
1109 |
+
|
1110 |
+
Example:
|
1111 |
+
|
1112 |
+
```python
|
1113 |
+
>>> from transformers import AutoTokenizer, BlenderbotModel
|
1114 |
+
|
1115 |
+
>>> model = BlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill")
|
1116 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
1117 |
+
|
1118 |
+
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
|
1119 |
+
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
|
1120 |
+
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids)
|
1121 |
+
|
1122 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
1123 |
+
>>> list(last_hidden_states.shape)
|
1124 |
+
[1, 6, 1280]
|
1125 |
+
```"""
|
1126 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1127 |
+
output_hidden_states = (
|
1128 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1129 |
+
)
|
1130 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1131 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1132 |
+
|
1133 |
+
if encoder_outputs is None:
|
1134 |
+
encoder_outputs = self.encoder(
|
1135 |
+
input_ids=input_ids,
|
1136 |
+
attention_mask=attention_mask,
|
1137 |
+
head_mask=head_mask,
|
1138 |
+
inputs_embeds=inputs_embeds,
|
1139 |
+
output_attentions=output_attentions,
|
1140 |
+
output_hidden_states=output_hidden_states,
|
1141 |
+
return_dict=return_dict,
|
1142 |
+
)
|
1143 |
+
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
|
1144 |
+
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
|
1145 |
+
encoder_outputs = BaseModelOutput(
|
1146 |
+
last_hidden_state=encoder_outputs[0],
|
1147 |
+
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
|
1148 |
+
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
|
1149 |
+
)
|
1150 |
+
|
1151 |
+
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
|
1152 |
+
decoder_outputs = self.decoder(
|
1153 |
+
input_ids=decoder_input_ids,
|
1154 |
+
attention_mask=decoder_attention_mask,
|
1155 |
+
encoder_hidden_states=encoder_outputs[0],
|
1156 |
+
encoder_attention_mask=attention_mask,
|
1157 |
+
head_mask=decoder_head_mask,
|
1158 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1159 |
+
past_key_values=past_key_values,
|
1160 |
+
inputs_embeds=decoder_inputs_embeds,
|
1161 |
+
use_cache=use_cache,
|
1162 |
+
output_attentions=output_attentions,
|
1163 |
+
output_hidden_states=output_hidden_states,
|
1164 |
+
return_dict=return_dict,
|
1165 |
+
)
|
1166 |
+
|
1167 |
+
if not return_dict:
|
1168 |
+
return decoder_outputs + encoder_outputs
|
1169 |
+
|
1170 |
+
return Seq2SeqModelOutput(
|
1171 |
+
last_hidden_state=decoder_outputs.last_hidden_state,
|
1172 |
+
past_key_values=decoder_outputs.past_key_values,
|
1173 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
1174 |
+
decoder_attentions=decoder_outputs.attentions,
|
1175 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
1176 |
+
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
1177 |
+
encoder_hidden_states=encoder_outputs.hidden_states,
|
1178 |
+
encoder_attentions=encoder_outputs.attentions,
|
1179 |
+
)
|
1180 |
+
|
1181 |
+
|
1182 |
+
@add_start_docstrings(
|
1183 |
+
"The Blenderbot Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING
|
1184 |
+
)
|
1185 |
+
class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel):
|
1186 |
+
base_model_prefix = "model"
|
1187 |
+
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
|
1188 |
+
_tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"]
|
1189 |
+
|
1190 |
+
def __init__(self, config: BlenderbotConfig):
|
1191 |
+
super().__init__(config)
|
1192 |
+
self.model = BlenderbotModel(config)
|
1193 |
+
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
|
1194 |
+
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
|
1195 |
+
|
1196 |
+
# Initialize weights and apply final processing
|
1197 |
+
self.post_init()
|
1198 |
+
|
1199 |
+
@classmethod
|
1200 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
1201 |
+
if pretrained_model_name_or_path == "facebook/blenderbot-90M":
|
1202 |
+
warnings.warn(
|
1203 |
+
"The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
|
1204 |
+
" checkpoint `facebook/small_blenderbot-90M` with"
|
1205 |
+
" `BlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.",
|
1206 |
+
FutureWarning,
|
1207 |
+
)
|
1208 |
+
return BlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path)
|
1209 |
+
|
1210 |
+
return super(BlenderbotForConditionalGeneration, cls).from_pretrained(
|
1211 |
+
pretrained_model_name_or_path, *model_args, **kwargs
|
1212 |
+
)
|
1213 |
+
|
1214 |
+
def get_encoder(self):
|
1215 |
+
return self.model.get_encoder()
|
1216 |
+
|
1217 |
+
def get_decoder(self):
|
1218 |
+
return self.model.get_decoder()
|
1219 |
+
|
1220 |
+
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
|
1221 |
+
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
1222 |
+
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
|
1223 |
+
return new_embeddings
|
1224 |
+
|
1225 |
+
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
|
1226 |
+
old_num_tokens = self.final_logits_bias.shape[-1]
|
1227 |
+
if new_num_tokens <= old_num_tokens:
|
1228 |
+
new_bias = self.final_logits_bias[:, :new_num_tokens]
|
1229 |
+
else:
|
1230 |
+
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
|
1231 |
+
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
|
1232 |
+
self.register_buffer("final_logits_bias", new_bias)
|
1233 |
+
|
1234 |
+
def get_output_embeddings(self):
|
1235 |
+
return self.lm_head
|
1236 |
+
|
1237 |
+
def set_output_embeddings(self, new_embeddings):
|
1238 |
+
self.lm_head = new_embeddings
|
1239 |
+
|
1240 |
+
@add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
|
1241 |
+
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
|
1242 |
+
@add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
|
1243 |
+
def forward(
|
1244 |
+
self,
|
1245 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1246 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1247 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
1248 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
1249 |
+
head_mask: Optional[torch.Tensor] = None,
|
1250 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
1251 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
1252 |
+
encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
|
1253 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1254 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1255 |
+
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1256 |
+
labels: Optional[torch.LongTensor] = None,
|
1257 |
+
use_cache: Optional[bool] = None,
|
1258 |
+
output_attentions: Optional[bool] = None,
|
1259 |
+
output_hidden_states: Optional[bool] = None,
|
1260 |
+
return_dict: Optional[bool] = None,
|
1261 |
+
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
|
1262 |
+
r"""
|
1263 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1264 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1265 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1266 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1267 |
+
|
1268 |
+
Returns:
|
1269 |
+
"""
|
1270 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1271 |
+
|
1272 |
+
if labels is not None:
|
1273 |
+
if use_cache:
|
1274 |
+
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
|
1275 |
+
use_cache = False
|
1276 |
+
if decoder_input_ids is None and decoder_inputs_embeds is None:
|
1277 |
+
decoder_input_ids = shift_tokens_right(
|
1278 |
+
labels, self.config.pad_token_id, self.config.decoder_start_token_id
|
1279 |
+
)
|
1280 |
+
|
1281 |
+
outputs = self.model(
|
1282 |
+
input_ids,
|
1283 |
+
attention_mask=attention_mask,
|
1284 |
+
decoder_input_ids=decoder_input_ids,
|
1285 |
+
encoder_outputs=encoder_outputs,
|
1286 |
+
decoder_attention_mask=decoder_attention_mask,
|
1287 |
+
head_mask=head_mask,
|
1288 |
+
decoder_head_mask=decoder_head_mask,
|
1289 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1290 |
+
past_key_values=past_key_values,
|
1291 |
+
inputs_embeds=inputs_embeds,
|
1292 |
+
decoder_inputs_embeds=decoder_inputs_embeds,
|
1293 |
+
use_cache=use_cache,
|
1294 |
+
output_attentions=output_attentions,
|
1295 |
+
output_hidden_states=output_hidden_states,
|
1296 |
+
return_dict=return_dict,
|
1297 |
+
)
|
1298 |
+
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
|
1299 |
+
|
1300 |
+
masked_lm_loss = None
|
1301 |
+
if labels is not None:
|
1302 |
+
loss_fct = CrossEntropyLoss()
|
1303 |
+
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
|
1304 |
+
|
1305 |
+
if not return_dict:
|
1306 |
+
output = (lm_logits,) + outputs[1:]
|
1307 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
1308 |
+
|
1309 |
+
return Seq2SeqLMOutput(
|
1310 |
+
loss=masked_lm_loss,
|
1311 |
+
logits=lm_logits,
|
1312 |
+
past_key_values=outputs.past_key_values,
|
1313 |
+
decoder_hidden_states=outputs.decoder_hidden_states,
|
1314 |
+
decoder_attentions=outputs.decoder_attentions,
|
1315 |
+
cross_attentions=outputs.cross_attentions,
|
1316 |
+
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
|
1317 |
+
encoder_hidden_states=outputs.encoder_hidden_states,
|
1318 |
+
encoder_attentions=outputs.encoder_attentions,
|
1319 |
+
)
|
1320 |
+
|
1321 |
+
def prepare_inputs_for_generation(
|
1322 |
+
self,
|
1323 |
+
decoder_input_ids,
|
1324 |
+
past_key_values=None,
|
1325 |
+
attention_mask=None,
|
1326 |
+
head_mask=None,
|
1327 |
+
decoder_head_mask=None,
|
1328 |
+
cross_attn_head_mask=None,
|
1329 |
+
use_cache=None,
|
1330 |
+
encoder_outputs=None,
|
1331 |
+
**kwargs,
|
1332 |
+
):
|
1333 |
+
# cut decoder_input_ids if past is used
|
1334 |
+
if past_key_values is not None:
|
1335 |
+
past_length = past_key_values[0][0].shape[2]
|
1336 |
+
|
1337 |
+
# Some generation methods already pass only the last input ID
|
1338 |
+
if decoder_input_ids.shape[1] > past_length:
|
1339 |
+
remove_prefix_length = past_length
|
1340 |
+
else:
|
1341 |
+
# Default to old behavior: keep only final ID
|
1342 |
+
remove_prefix_length = decoder_input_ids.shape[1] - 1
|
1343 |
+
|
1344 |
+
decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
|
1345 |
+
|
1346 |
+
return {
|
1347 |
+
"input_ids": None, # encoder_outputs is defined. input_ids not needed
|
1348 |
+
"encoder_outputs": encoder_outputs,
|
1349 |
+
"past_key_values": past_key_values,
|
1350 |
+
"decoder_input_ids": decoder_input_ids,
|
1351 |
+
"attention_mask": attention_mask,
|
1352 |
+
"head_mask": head_mask,
|
1353 |
+
"decoder_head_mask": decoder_head_mask,
|
1354 |
+
"cross_attn_head_mask": cross_attn_head_mask,
|
1355 |
+
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
|
1356 |
+
}
|
1357 |
+
|
1358 |
+
@staticmethod
|
1359 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1360 |
+
reordered_past = ()
|
1361 |
+
for layer_past in past_key_values:
|
1362 |
+
# cached cross_attention states don't have to be reordered -> they are always the same
|
1363 |
+
reordered_past += (
|
1364 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
|
1365 |
+
+ layer_past[2:],
|
1366 |
+
)
|
1367 |
+
return reordered_past
|
1368 |
+
|
1369 |
+
|
1370 |
+
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Blenderbot
|
1371 |
+
class BlenderbotDecoderWrapper(BlenderbotPreTrainedModel):
|
1372 |
+
"""
|
1373 |
+
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
|
1374 |
+
used in combination with the [`EncoderDecoderModel`] framework.
|
1375 |
+
"""
|
1376 |
+
|
1377 |
+
def __init__(self, config):
|
1378 |
+
super().__init__(config)
|
1379 |
+
self.decoder = BlenderbotDecoder(config)
|
1380 |
+
|
1381 |
+
def forward(self, *args, **kwargs):
|
1382 |
+
return self.decoder(*args, **kwargs)
|
1383 |
+
|
1384 |
+
|
1385 |
+
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Blenderbot, facebook/bart-base->facebook/blenderbot-400M-distill
|
1386 |
+
class BlenderbotForCausalLM(BlenderbotPreTrainedModel):
|
1387 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1388 |
+
|
1389 |
+
def __init__(self, config):
|
1390 |
+
config = copy.deepcopy(config)
|
1391 |
+
config.is_decoder = True
|
1392 |
+
config.is_encoder_decoder = False
|
1393 |
+
super().__init__(config)
|
1394 |
+
self.model = BlenderbotDecoderWrapper(config)
|
1395 |
+
|
1396 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1397 |
+
|
1398 |
+
# Initialize weights and apply final processing
|
1399 |
+
self.post_init()
|
1400 |
+
|
1401 |
+
def get_input_embeddings(self):
|
1402 |
+
return self.model.decoder.embed_tokens
|
1403 |
+
|
1404 |
+
def set_input_embeddings(self, value):
|
1405 |
+
self.model.decoder.embed_tokens = value
|
1406 |
+
|
1407 |
+
def get_output_embeddings(self):
|
1408 |
+
return self.lm_head
|
1409 |
+
|
1410 |
+
def set_output_embeddings(self, new_embeddings):
|
1411 |
+
self.lm_head = new_embeddings
|
1412 |
+
|
1413 |
+
def set_decoder(self, decoder):
|
1414 |
+
self.model.decoder = decoder
|
1415 |
+
|
1416 |
+
def get_decoder(self):
|
1417 |
+
return self.model.decoder
|
1418 |
+
|
1419 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
|
1420 |
+
def forward(
|
1421 |
+
self,
|
1422 |
+
input_ids: torch.LongTensor = None,
|
1423 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1424 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
1425 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
1426 |
+
head_mask: Optional[torch.Tensor] = None,
|
1427 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
1428 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1429 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1430 |
+
labels: Optional[torch.LongTensor] = None,
|
1431 |
+
use_cache: Optional[bool] = None,
|
1432 |
+
output_attentions: Optional[bool] = None,
|
1433 |
+
output_hidden_states: Optional[bool] = None,
|
1434 |
+
return_dict: Optional[bool] = None,
|
1435 |
+
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
|
1436 |
+
r"""
|
1437 |
+
Args:
|
1438 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
1439 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
1440 |
+
provide it.
|
1441 |
+
|
1442 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1443 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1444 |
+
|
1445 |
+
[What are input IDs?](../glossary#input-ids)
|
1446 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1447 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
1448 |
+
|
1449 |
+
- 1 for tokens that are **not masked**,
|
1450 |
+
- 0 for tokens that are **masked**.
|
1451 |
+
|
1452 |
+
[What are attention masks?](../glossary#attention-mask)
|
1453 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1454 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
|
1455 |
+
if the model is configured as a decoder.
|
1456 |
+
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1457 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
|
1458 |
+
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
1459 |
+
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
1460 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
1461 |
+
|
1462 |
+
- 1 indicates the head is **not masked**,
|
1463 |
+
- 0 indicates the head is **masked**.
|
1464 |
+
|
1465 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
1466 |
+
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
|
1467 |
+
|
1468 |
+
- 1 indicates the head is **not masked**,
|
1469 |
+
- 0 indicates the head is **masked**.
|
1470 |
+
|
1471 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
1472 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
1473 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
1474 |
+
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
|
1475 |
+
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
|
1476 |
+
|
1477 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
|
1478 |
+
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
1479 |
+
|
1480 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
|
1481 |
+
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
|
1482 |
+
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1483 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1484 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1485 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1486 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1487 |
+
use_cache (`bool`, *optional*):
|
1488 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
1489 |
+
(see `past_key_values`).
|
1490 |
+
|
1491 |
+
- 1 for tokens that are **not masked**,
|
1492 |
+
- 0 for tokens that are **masked**.
|
1493 |
+
output_attentions (`bool`, *optional*):
|
1494 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
1495 |
+
returned tensors for more detail.
|
1496 |
+
output_hidden_states (`bool`, *optional*):
|
1497 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
1498 |
+
for more detail.
|
1499 |
+
return_dict (`bool`, *optional*):
|
1500 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1501 |
+
|
1502 |
+
Returns:
|
1503 |
+
|
1504 |
+
Example:
|
1505 |
+
|
1506 |
+
```python
|
1507 |
+
>>> from transformers import AutoTokenizer, BlenderbotForCausalLM
|
1508 |
+
|
1509 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
1510 |
+
>>> model = BlenderbotForCausalLM.from_pretrained("facebook/blenderbot-400M-distill", add_cross_attention=False)
|
1511 |
+
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
|
1512 |
+
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
1513 |
+
>>> outputs = model(**inputs)
|
1514 |
+
|
1515 |
+
>>> logits = outputs.logits
|
1516 |
+
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
|
1517 |
+
>>> list(logits.shape) == expected_shape
|
1518 |
+
True
|
1519 |
+
```"""
|
1520 |
+
|
1521 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1522 |
+
output_hidden_states = (
|
1523 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1524 |
+
)
|
1525 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1526 |
+
|
1527 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1528 |
+
outputs = self.model.decoder(
|
1529 |
+
input_ids=input_ids,
|
1530 |
+
attention_mask=attention_mask,
|
1531 |
+
encoder_hidden_states=encoder_hidden_states,
|
1532 |
+
encoder_attention_mask=encoder_attention_mask,
|
1533 |
+
head_mask=head_mask,
|
1534 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1535 |
+
past_key_values=past_key_values,
|
1536 |
+
inputs_embeds=inputs_embeds,
|
1537 |
+
use_cache=use_cache,
|
1538 |
+
output_attentions=output_attentions,
|
1539 |
+
output_hidden_states=output_hidden_states,
|
1540 |
+
return_dict=return_dict,
|
1541 |
+
)
|
1542 |
+
|
1543 |
+
logits = self.lm_head(outputs[0])
|
1544 |
+
|
1545 |
+
loss = None
|
1546 |
+
if labels is not None:
|
1547 |
+
labels = labels.to(logits.device)
|
1548 |
+
loss_fct = CrossEntropyLoss()
|
1549 |
+
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
|
1550 |
+
|
1551 |
+
if not return_dict:
|
1552 |
+
output = (logits,) + outputs[1:]
|
1553 |
+
return (loss,) + output if loss is not None else output
|
1554 |
+
|
1555 |
+
return CausalLMOutputWithCrossAttentions(
|
1556 |
+
loss=loss,
|
1557 |
+
logits=logits,
|
1558 |
+
past_key_values=outputs.past_key_values,
|
1559 |
+
hidden_states=outputs.hidden_states,
|
1560 |
+
attentions=outputs.attentions,
|
1561 |
+
cross_attentions=outputs.cross_attentions,
|
1562 |
+
)
|
1563 |
+
|
1564 |
+
def prepare_inputs_for_generation(
|
1565 |
+
self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
|
1566 |
+
):
|
1567 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
1568 |
+
if attention_mask is None:
|
1569 |
+
attention_mask = input_ids.new_ones(input_ids.shape)
|
1570 |
+
|
1571 |
+
if past_key_values:
|
1572 |
+
past_length = past_key_values[0][0].shape[2]
|
1573 |
+
|
1574 |
+
# Some generation methods already pass only the last input ID
|
1575 |
+
if input_ids.shape[1] > past_length:
|
1576 |
+
remove_prefix_length = past_length
|
1577 |
+
else:
|
1578 |
+
# Default to old behavior: keep only final ID
|
1579 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
1580 |
+
|
1581 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
1582 |
+
# first step, decoder_cached_states are empty
|
1583 |
+
return {
|
1584 |
+
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
|
1585 |
+
"attention_mask": attention_mask,
|
1586 |
+
"past_key_values": past_key_values,
|
1587 |
+
"use_cache": use_cache,
|
1588 |
+
}
|
1589 |
+
|
1590 |
+
@staticmethod
|
1591 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1592 |
+
reordered_past = ()
|
1593 |
+
for layer_past in past_key_values:
|
1594 |
+
reordered_past += (
|
1595 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1596 |
+
)
|
1597 |
+
return reordered_past
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py
ADDED
@@ -0,0 +1,1505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" Flax Blenderbot model."""
|
16 |
+
|
17 |
+
import math
|
18 |
+
import random
|
19 |
+
from functools import partial
|
20 |
+
from typing import Callable, Optional, Tuple
|
21 |
+
|
22 |
+
import flax.linen as nn
|
23 |
+
import jax
|
24 |
+
import jax.numpy as jnp
|
25 |
+
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
|
26 |
+
from flax.linen import combine_masks, make_causal_mask
|
27 |
+
from flax.linen.attention import dot_product_attention_weights
|
28 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
29 |
+
from jax import lax
|
30 |
+
from jax.random import PRNGKey
|
31 |
+
|
32 |
+
from ...modeling_flax_outputs import (
|
33 |
+
FlaxBaseModelOutput,
|
34 |
+
FlaxBaseModelOutputWithPastAndCrossAttentions,
|
35 |
+
FlaxCausalLMOutputWithCrossAttentions,
|
36 |
+
FlaxSeq2SeqLMOutput,
|
37 |
+
FlaxSeq2SeqModelOutput,
|
38 |
+
)
|
39 |
+
from ...modeling_flax_utils import (
|
40 |
+
ACT2FN,
|
41 |
+
FlaxPreTrainedModel,
|
42 |
+
append_call_sample_docstring,
|
43 |
+
append_replace_return_docstrings,
|
44 |
+
overwrite_call_docstring,
|
45 |
+
)
|
46 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
47 |
+
from .configuration_blenderbot import BlenderbotConfig
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
_CONFIG_FOR_DOC = "BlenderbotConfig"
|
53 |
+
_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
|
54 |
+
|
55 |
+
|
56 |
+
BLENDERBOT_START_DOCSTRING = r"""
|
57 |
+
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
58 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
59 |
+
etc.)
|
60 |
+
|
61 |
+
This model is also a Flax Linen
|
62 |
+
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
|
63 |
+
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
|
64 |
+
|
65 |
+
Finally, this model supports inherent JAX features such as:
|
66 |
+
|
67 |
+
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
68 |
+
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
69 |
+
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
70 |
+
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
71 |
+
|
72 |
+
Parameters:
|
73 |
+
config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.
|
74 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
75 |
+
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
|
76 |
+
"""
|
77 |
+
|
78 |
+
BLENDERBOT_INPUTS_DOCSTRING = r"""
|
79 |
+
Args:
|
80 |
+
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
|
81 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
82 |
+
it.
|
83 |
+
|
84 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
85 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
86 |
+
|
87 |
+
[What are input IDs?](../glossary#input-ids)
|
88 |
+
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
89 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
90 |
+
|
91 |
+
- 1 for tokens that are **not masked**,
|
92 |
+
- 0 for tokens that are **masked**.
|
93 |
+
|
94 |
+
[What are attention masks?](../glossary#attention-mask)
|
95 |
+
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
|
96 |
+
Indices of decoder input sequence tokens in the vocabulary.
|
97 |
+
|
98 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
99 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
100 |
+
|
101 |
+
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
102 |
+
|
103 |
+
For translation and summarization training, `decoder_input_ids` should be provided. If no
|
104 |
+
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
|
105 |
+
for denoising pre-training following the paper.
|
106 |
+
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
|
107 |
+
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
108 |
+
be used by default.
|
109 |
+
|
110 |
+
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
|
111 |
+
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
|
112 |
+
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
113 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
114 |
+
config.max_position_embeddings - 1]`.
|
115 |
+
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
116 |
+
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
|
117 |
+
range `[0, config.max_position_embeddings - 1]`.
|
118 |
+
output_attentions (`bool`, *optional*):
|
119 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
120 |
+
tensors for more detail.
|
121 |
+
output_hidden_states (`bool`, *optional*):
|
122 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
123 |
+
more detail.
|
124 |
+
return_dict (`bool`, *optional*):
|
125 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
126 |
+
"""
|
127 |
+
|
128 |
+
|
129 |
+
BLENDERBOT_ENCODE_INPUTS_DOCSTRING = r"""
|
130 |
+
Args:
|
131 |
+
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
|
132 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
133 |
+
it.
|
134 |
+
|
135 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
136 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
137 |
+
|
138 |
+
[What are input IDs?](../glossary#input-ids)
|
139 |
+
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
140 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
141 |
+
|
142 |
+
- 1 for tokens that are **not masked**,
|
143 |
+
- 0 for tokens that are **masked**.
|
144 |
+
|
145 |
+
[What are attention masks?](../glossary#attention-mask)
|
146 |
+
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
147 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
148 |
+
config.max_position_embeddings - 1]`.
|
149 |
+
output_attentions (`bool`, *optional*):
|
150 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
151 |
+
tensors for more detail.
|
152 |
+
output_hidden_states (`bool`, *optional*):
|
153 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
154 |
+
more detail.
|
155 |
+
return_dict (`bool`, *optional*):
|
156 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
157 |
+
"""
|
158 |
+
|
159 |
+
BLENDERBOT_DECODE_INPUTS_DOCSTRING = r"""
|
160 |
+
Args:
|
161 |
+
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
|
162 |
+
Indices of decoder input sequence tokens in the vocabulary.
|
163 |
+
|
164 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
165 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
166 |
+
|
167 |
+
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
168 |
+
|
169 |
+
For translation and summarization training, `decoder_input_ids` should be provided. If no
|
170 |
+
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
|
171 |
+
for denoising pre-training following the paper.
|
172 |
+
encoder_outputs (`tuple(tuple(jnp.ndarray)`):
|
173 |
+
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
174 |
+
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
|
175 |
+
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
|
176 |
+
encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
177 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
178 |
+
|
179 |
+
- 1 for tokens that are **not masked**,
|
180 |
+
- 0 for tokens that are **masked**.
|
181 |
+
|
182 |
+
[What are attention masks?](../glossary#attention-mask)
|
183 |
+
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
|
184 |
+
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
185 |
+
be used by default.
|
186 |
+
|
187 |
+
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
|
188 |
+
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
|
189 |
+
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
|
190 |
+
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
|
191 |
+
range `[0, config.max_position_embeddings - 1]`.
|
192 |
+
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
|
193 |
+
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
|
194 |
+
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
|
195 |
+
output_attentions (`bool`, *optional*):
|
196 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
197 |
+
tensors for more detail.
|
198 |
+
output_hidden_states (`bool`, *optional*):
|
199 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
200 |
+
more detail.
|
201 |
+
return_dict (`bool`, *optional*):
|
202 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
203 |
+
"""
|
204 |
+
|
205 |
+
|
206 |
+
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
|
207 |
+
def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
|
208 |
+
"""
|
209 |
+
Shift input ids one token to the right.
|
210 |
+
"""
|
211 |
+
shifted_input_ids = jnp.zeros_like(input_ids)
|
212 |
+
shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
|
213 |
+
shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
|
214 |
+
|
215 |
+
shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
|
216 |
+
return shifted_input_ids
|
217 |
+
|
218 |
+
|
219 |
+
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Blenderbot
|
220 |
+
class FlaxBlenderbotAttention(nn.Module):
|
221 |
+
config: BlenderbotConfig
|
222 |
+
embed_dim: int
|
223 |
+
num_heads: int
|
224 |
+
dropout: float = 0.0
|
225 |
+
causal: bool = False
|
226 |
+
bias: bool = True
|
227 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
228 |
+
|
229 |
+
def setup(self) -> None:
|
230 |
+
self.head_dim = self.embed_dim // self.num_heads
|
231 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
232 |
+
raise ValueError(
|
233 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
234 |
+
f" and `num_heads`: {self.num_heads})."
|
235 |
+
)
|
236 |
+
|
237 |
+
dense = partial(
|
238 |
+
nn.Dense,
|
239 |
+
self.embed_dim,
|
240 |
+
use_bias=self.bias,
|
241 |
+
dtype=self.dtype,
|
242 |
+
kernel_init=jax.nn.initializers.normal(self.config.init_std),
|
243 |
+
)
|
244 |
+
|
245 |
+
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
|
246 |
+
self.out_proj = dense()
|
247 |
+
|
248 |
+
self.dropout_layer = nn.Dropout(rate=self.dropout)
|
249 |
+
|
250 |
+
if self.causal:
|
251 |
+
self.causal_mask = make_causal_mask(
|
252 |
+
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
|
253 |
+
)
|
254 |
+
|
255 |
+
def _split_heads(self, hidden_states):
|
256 |
+
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
|
257 |
+
|
258 |
+
def _merge_heads(self, hidden_states):
|
259 |
+
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
|
260 |
+
|
261 |
+
@nn.compact
|
262 |
+
def _concatenate_to_cache(self, key, value, query, attention_mask):
|
263 |
+
"""
|
264 |
+
This function takes projected key, value states from a single input token and concatenates the states to cached
|
265 |
+
states from previous steps. This function is slighly adapted from the official Flax repository:
|
266 |
+
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
|
267 |
+
"""
|
268 |
+
# detect if we're initializing by absence of existing cache data.
|
269 |
+
is_initialized = self.has_variable("cache", "cached_key")
|
270 |
+
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
|
271 |
+
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
|
272 |
+
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
|
273 |
+
|
274 |
+
if is_initialized:
|
275 |
+
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
|
276 |
+
# update key, value caches with our new 1d spatial slices
|
277 |
+
cur_index = cache_index.value
|
278 |
+
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
|
279 |
+
key = lax.dynamic_update_slice(cached_key.value, key, indices)
|
280 |
+
value = lax.dynamic_update_slice(cached_value.value, value, indices)
|
281 |
+
cached_key.value = key
|
282 |
+
cached_value.value = value
|
283 |
+
num_updated_cache_vectors = query.shape[1]
|
284 |
+
cache_index.value = cache_index.value + num_updated_cache_vectors
|
285 |
+
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
|
286 |
+
pad_mask = jnp.broadcast_to(
|
287 |
+
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
|
288 |
+
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
|
289 |
+
)
|
290 |
+
attention_mask = combine_masks(pad_mask, attention_mask)
|
291 |
+
return key, value, attention_mask
|
292 |
+
|
293 |
+
def __call__(
|
294 |
+
self,
|
295 |
+
hidden_states: jnp.ndarray,
|
296 |
+
key_value_states: Optional[jnp.ndarray] = None,
|
297 |
+
attention_mask: Optional[jnp.ndarray] = None,
|
298 |
+
init_cache: bool = False,
|
299 |
+
deterministic: bool = True,
|
300 |
+
) -> Tuple[jnp.ndarray]:
|
301 |
+
"""Input shape: Batch x Time x Channel"""
|
302 |
+
|
303 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
304 |
+
# for the decoder
|
305 |
+
is_cross_attention = key_value_states is not None
|
306 |
+
batch_size = hidden_states.shape[0]
|
307 |
+
|
308 |
+
# get query proj
|
309 |
+
query_states = self.q_proj(hidden_states)
|
310 |
+
# get key, value proj
|
311 |
+
if is_cross_attention:
|
312 |
+
# cross_attentions
|
313 |
+
key_states = self.k_proj(key_value_states)
|
314 |
+
value_states = self.v_proj(key_value_states)
|
315 |
+
else:
|
316 |
+
# self_attention
|
317 |
+
key_states = self.k_proj(hidden_states)
|
318 |
+
value_states = self.v_proj(hidden_states)
|
319 |
+
|
320 |
+
query_states = self._split_heads(query_states)
|
321 |
+
key_states = self._split_heads(key_states)
|
322 |
+
value_states = self._split_heads(value_states)
|
323 |
+
|
324 |
+
# handle cache prepare causal attention mask
|
325 |
+
if self.causal:
|
326 |
+
query_length, key_length = query_states.shape[1], key_states.shape[1]
|
327 |
+
if self.has_variable("cache", "cached_key"):
|
328 |
+
mask_shift = self.variables["cache"]["cache_index"]
|
329 |
+
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
|
330 |
+
causal_mask = lax.dynamic_slice(
|
331 |
+
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
|
332 |
+
)
|
333 |
+
else:
|
334 |
+
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
|
335 |
+
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
|
336 |
+
|
337 |
+
# combine masks if needed
|
338 |
+
if attention_mask is not None and self.causal:
|
339 |
+
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
|
340 |
+
attention_mask = combine_masks(attention_mask, causal_mask)
|
341 |
+
elif self.causal:
|
342 |
+
attention_mask = causal_mask
|
343 |
+
elif attention_mask is not None:
|
344 |
+
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
|
345 |
+
|
346 |
+
# During fast autoregressive decoding, we feed one position at a time,
|
347 |
+
# and cache the keys and values step by step.
|
348 |
+
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
|
349 |
+
key_states, value_states, attention_mask = self._concatenate_to_cache(
|
350 |
+
key_states, value_states, query_states, attention_mask
|
351 |
+
)
|
352 |
+
|
353 |
+
# Convert the boolean attention mask to an attention bias.
|
354 |
+
if attention_mask is not None:
|
355 |
+
# attention mask in the form of attention bias
|
356 |
+
attention_bias = lax.select(
|
357 |
+
attention_mask > 0,
|
358 |
+
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
|
359 |
+
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
|
360 |
+
)
|
361 |
+
else:
|
362 |
+
attention_bias = None
|
363 |
+
|
364 |
+
dropout_rng = None
|
365 |
+
if not deterministic and self.dropout > 0.0:
|
366 |
+
dropout_rng = self.make_rng("dropout")
|
367 |
+
|
368 |
+
attn_weights = dot_product_attention_weights(
|
369 |
+
query_states,
|
370 |
+
key_states,
|
371 |
+
bias=attention_bias,
|
372 |
+
dropout_rng=dropout_rng,
|
373 |
+
dropout_rate=self.dropout,
|
374 |
+
broadcast_dropout=True,
|
375 |
+
deterministic=deterministic,
|
376 |
+
dtype=self.dtype,
|
377 |
+
precision=None,
|
378 |
+
)
|
379 |
+
|
380 |
+
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
|
381 |
+
attn_output = self._merge_heads(attn_output)
|
382 |
+
attn_output = self.out_proj(attn_output)
|
383 |
+
|
384 |
+
return attn_output, attn_weights
|
385 |
+
|
386 |
+
|
387 |
+
# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartEncoderLayer with MBart->Blenderbot
|
388 |
+
class FlaxBlenderbotEncoderLayer(nn.Module):
|
389 |
+
config: BlenderbotConfig
|
390 |
+
dtype: jnp.dtype = jnp.float32
|
391 |
+
|
392 |
+
def setup(self) -> None:
|
393 |
+
self.embed_dim = self.config.d_model
|
394 |
+
self.self_attn = FlaxBlenderbotAttention(
|
395 |
+
config=self.config,
|
396 |
+
embed_dim=self.embed_dim,
|
397 |
+
num_heads=self.config.encoder_attention_heads,
|
398 |
+
dropout=self.config.attention_dropout,
|
399 |
+
dtype=self.dtype,
|
400 |
+
)
|
401 |
+
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
402 |
+
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
|
403 |
+
self.activation_fn = ACT2FN[self.config.activation_function]
|
404 |
+
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
|
405 |
+
self.fc1 = nn.Dense(
|
406 |
+
self.config.encoder_ffn_dim,
|
407 |
+
dtype=self.dtype,
|
408 |
+
kernel_init=jax.nn.initializers.normal(self.config.init_std),
|
409 |
+
)
|
410 |
+
self.fc2 = nn.Dense(
|
411 |
+
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
|
412 |
+
)
|
413 |
+
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
414 |
+
|
415 |
+
def __call__(
|
416 |
+
self,
|
417 |
+
hidden_states: jnp.ndarray,
|
418 |
+
attention_mask: jnp.ndarray,
|
419 |
+
output_attentions: bool = True,
|
420 |
+
deterministic: bool = True,
|
421 |
+
) -> Tuple[jnp.ndarray]:
|
422 |
+
residual = hidden_states
|
423 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
424 |
+
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
|
425 |
+
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
|
426 |
+
hidden_states = residual + hidden_states
|
427 |
+
|
428 |
+
residual = hidden_states
|
429 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
430 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
431 |
+
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
|
432 |
+
hidden_states = self.fc2(hidden_states)
|
433 |
+
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
|
434 |
+
hidden_states = residual + hidden_states
|
435 |
+
|
436 |
+
outputs = (hidden_states,)
|
437 |
+
|
438 |
+
if output_attentions:
|
439 |
+
outputs += (attn_weights,)
|
440 |
+
|
441 |
+
return outputs
|
442 |
+
|
443 |
+
|
444 |
+
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Blenderbot
|
445 |
+
class FlaxBlenderbotEncoderLayerCollection(nn.Module):
|
446 |
+
config: BlenderbotConfig
|
447 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
448 |
+
|
449 |
+
def setup(self):
|
450 |
+
self.layers = [
|
451 |
+
FlaxBlenderbotEncoderLayer(self.config, name=str(i), dtype=self.dtype)
|
452 |
+
for i in range(self.config.encoder_layers)
|
453 |
+
]
|
454 |
+
self.layerdrop = self.config.encoder_layerdrop
|
455 |
+
|
456 |
+
def __call__(
|
457 |
+
self,
|
458 |
+
hidden_states,
|
459 |
+
attention_mask,
|
460 |
+
deterministic: bool = True,
|
461 |
+
output_attentions: bool = False,
|
462 |
+
output_hidden_states: bool = False,
|
463 |
+
return_dict: bool = True,
|
464 |
+
):
|
465 |
+
all_attentions = () if output_attentions else None
|
466 |
+
all_hidden_states = () if output_hidden_states else None
|
467 |
+
|
468 |
+
for encoder_layer in self.layers:
|
469 |
+
if output_hidden_states:
|
470 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
471 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
472 |
+
dropout_probability = random.uniform(0, 1)
|
473 |
+
if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
|
474 |
+
layer_outputs = (None, None)
|
475 |
+
else:
|
476 |
+
layer_outputs = encoder_layer(
|
477 |
+
hidden_states,
|
478 |
+
attention_mask,
|
479 |
+
output_attentions,
|
480 |
+
deterministic,
|
481 |
+
)
|
482 |
+
hidden_states = layer_outputs[0]
|
483 |
+
if output_attentions:
|
484 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
485 |
+
|
486 |
+
if output_hidden_states:
|
487 |
+
all_hidden_states += (hidden_states,)
|
488 |
+
|
489 |
+
outputs = (hidden_states, all_hidden_states, all_attentions)
|
490 |
+
|
491 |
+
if not return_dict:
|
492 |
+
return tuple(v for v in outputs if v is not None)
|
493 |
+
|
494 |
+
return FlaxBaseModelOutput(
|
495 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
496 |
+
)
|
497 |
+
|
498 |
+
|
499 |
+
# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartDecoderLayer with MBart->Blenderbot
|
500 |
+
class FlaxBlenderbotDecoderLayer(nn.Module):
|
501 |
+
config: BlenderbotConfig
|
502 |
+
dtype: jnp.dtype = jnp.float32
|
503 |
+
|
504 |
+
def setup(self) -> None:
|
505 |
+
self.embed_dim = self.config.d_model
|
506 |
+
self.self_attn = FlaxBlenderbotAttention(
|
507 |
+
config=self.config,
|
508 |
+
embed_dim=self.embed_dim,
|
509 |
+
num_heads=self.config.decoder_attention_heads,
|
510 |
+
dropout=self.config.attention_dropout,
|
511 |
+
causal=True,
|
512 |
+
dtype=self.dtype,
|
513 |
+
)
|
514 |
+
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
|
515 |
+
self.activation_fn = ACT2FN[self.config.activation_function]
|
516 |
+
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
|
517 |
+
|
518 |
+
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
519 |
+
self.encoder_attn = FlaxBlenderbotAttention(
|
520 |
+
config=self.config,
|
521 |
+
embed_dim=self.embed_dim,
|
522 |
+
num_heads=self.config.decoder_attention_heads,
|
523 |
+
dropout=self.config.attention_dropout,
|
524 |
+
dtype=self.dtype,
|
525 |
+
)
|
526 |
+
self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
527 |
+
self.fc1 = nn.Dense(
|
528 |
+
self.config.decoder_ffn_dim,
|
529 |
+
dtype=self.dtype,
|
530 |
+
kernel_init=jax.nn.initializers.normal(self.config.init_std),
|
531 |
+
)
|
532 |
+
self.fc2 = nn.Dense(
|
533 |
+
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
|
534 |
+
)
|
535 |
+
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
536 |
+
|
537 |
+
def __call__(
|
538 |
+
self,
|
539 |
+
hidden_states: jnp.ndarray,
|
540 |
+
attention_mask: jnp.ndarray,
|
541 |
+
encoder_hidden_states: Optional[jnp.ndarray] = None,
|
542 |
+
encoder_attention_mask: Optional[jnp.ndarray] = None,
|
543 |
+
init_cache: bool = False,
|
544 |
+
output_attentions: bool = True,
|
545 |
+
deterministic: bool = True,
|
546 |
+
) -> Tuple[jnp.ndarray]:
|
547 |
+
residual = hidden_states
|
548 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
549 |
+
|
550 |
+
# Self Attention
|
551 |
+
hidden_states, self_attn_weights = self.self_attn(
|
552 |
+
hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
|
553 |
+
)
|
554 |
+
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
|
555 |
+
hidden_states = residual + hidden_states
|
556 |
+
|
557 |
+
# Cross-Attention Block
|
558 |
+
cross_attn_weights = None
|
559 |
+
if encoder_hidden_states is not None:
|
560 |
+
residual = hidden_states
|
561 |
+
|
562 |
+
hidden_states = self.encoder_attn_layer_norm(hidden_states)
|
563 |
+
hidden_states, cross_attn_weights = self.encoder_attn(
|
564 |
+
hidden_states=hidden_states,
|
565 |
+
key_value_states=encoder_hidden_states,
|
566 |
+
attention_mask=encoder_attention_mask,
|
567 |
+
)
|
568 |
+
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
|
569 |
+
hidden_states = residual + hidden_states
|
570 |
+
|
571 |
+
# Fully Connected
|
572 |
+
residual = hidden_states
|
573 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
574 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
575 |
+
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
|
576 |
+
hidden_states = self.fc2(hidden_states)
|
577 |
+
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
|
578 |
+
hidden_states = residual + hidden_states
|
579 |
+
|
580 |
+
outputs = (hidden_states,)
|
581 |
+
|
582 |
+
if output_attentions:
|
583 |
+
outputs += (self_attn_weights, cross_attn_weights)
|
584 |
+
|
585 |
+
return outputs
|
586 |
+
|
587 |
+
|
588 |
+
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Blenderbot
|
589 |
+
class FlaxBlenderbotDecoderLayerCollection(nn.Module):
|
590 |
+
config: BlenderbotConfig
|
591 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
592 |
+
|
593 |
+
def setup(self):
|
594 |
+
self.layers = [
|
595 |
+
FlaxBlenderbotDecoderLayer(self.config, name=str(i), dtype=self.dtype)
|
596 |
+
for i in range(self.config.decoder_layers)
|
597 |
+
]
|
598 |
+
self.layerdrop = self.config.decoder_layerdrop
|
599 |
+
|
600 |
+
def __call__(
|
601 |
+
self,
|
602 |
+
hidden_states,
|
603 |
+
attention_mask,
|
604 |
+
encoder_hidden_states: Optional[jnp.ndarray] = None,
|
605 |
+
encoder_attention_mask: Optional[jnp.ndarray] = None,
|
606 |
+
deterministic: bool = True,
|
607 |
+
init_cache: bool = False,
|
608 |
+
output_attentions: bool = False,
|
609 |
+
output_hidden_states: bool = False,
|
610 |
+
return_dict: bool = True,
|
611 |
+
):
|
612 |
+
# decoder layers
|
613 |
+
all_hidden_states = () if output_hidden_states else None
|
614 |
+
all_self_attns = () if output_attentions else None
|
615 |
+
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
|
616 |
+
|
617 |
+
for decoder_layer in self.layers:
|
618 |
+
if output_hidden_states:
|
619 |
+
all_hidden_states += (hidden_states,)
|
620 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
621 |
+
dropout_probability = random.uniform(0, 1)
|
622 |
+
if not deterministic and (dropout_probability < self.layerdrop):
|
623 |
+
layer_outputs = (None, None, None)
|
624 |
+
else:
|
625 |
+
layer_outputs = decoder_layer(
|
626 |
+
hidden_states,
|
627 |
+
attention_mask=attention_mask,
|
628 |
+
encoder_hidden_states=encoder_hidden_states,
|
629 |
+
encoder_attention_mask=encoder_attention_mask,
|
630 |
+
init_cache=init_cache,
|
631 |
+
output_attentions=output_attentions,
|
632 |
+
deterministic=deterministic,
|
633 |
+
)
|
634 |
+
|
635 |
+
hidden_states = layer_outputs[0]
|
636 |
+
if output_attentions:
|
637 |
+
all_self_attns += (layer_outputs[1],)
|
638 |
+
|
639 |
+
if encoder_hidden_states is not None:
|
640 |
+
all_cross_attentions += (layer_outputs[2],)
|
641 |
+
|
642 |
+
# add hidden states from the last decoder layer
|
643 |
+
if output_hidden_states:
|
644 |
+
all_hidden_states += (hidden_states,)
|
645 |
+
|
646 |
+
outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
|
647 |
+
|
648 |
+
if not return_dict:
|
649 |
+
return tuple(v for v in outputs if v is not None)
|
650 |
+
|
651 |
+
return FlaxBaseModelOutputWithPastAndCrossAttentions(
|
652 |
+
last_hidden_state=hidden_states,
|
653 |
+
hidden_states=all_hidden_states,
|
654 |
+
attentions=all_self_attns,
|
655 |
+
cross_attentions=all_cross_attentions,
|
656 |
+
)
|
657 |
+
|
658 |
+
|
659 |
+
class FlaxBlenderbotEncoder(nn.Module):
|
660 |
+
config: BlenderbotConfig
|
661 |
+
embed_tokens: nn.Embed
|
662 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
663 |
+
|
664 |
+
def setup(self):
|
665 |
+
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
|
666 |
+
|
667 |
+
embed_dim = self.config.d_model
|
668 |
+
self.padding_idx = self.config.pad_token_id
|
669 |
+
self.max_source_positions = self.config.max_position_embeddings
|
670 |
+
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
|
671 |
+
|
672 |
+
self.embed_positions = nn.Embed(
|
673 |
+
self.config.max_position_embeddings,
|
674 |
+
embed_dim,
|
675 |
+
embedding_init=jax.nn.initializers.normal(self.config.init_std),
|
676 |
+
)
|
677 |
+
self.layers = FlaxBlenderbotEncoderLayerCollection(self.config, self.dtype)
|
678 |
+
self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
679 |
+
|
680 |
+
def __call__(
|
681 |
+
self,
|
682 |
+
input_ids,
|
683 |
+
attention_mask,
|
684 |
+
position_ids,
|
685 |
+
output_attentions: bool = False,
|
686 |
+
output_hidden_states: bool = False,
|
687 |
+
return_dict: bool = True,
|
688 |
+
deterministic: bool = True,
|
689 |
+
):
|
690 |
+
input_shape = input_ids.shape
|
691 |
+
input_ids = input_ids.reshape(-1, input_shape[-1])
|
692 |
+
|
693 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
694 |
+
|
695 |
+
embed_pos = self.embed_positions(position_ids)
|
696 |
+
|
697 |
+
hidden_states = inputs_embeds + embed_pos
|
698 |
+
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
|
699 |
+
|
700 |
+
outputs = self.layers(
|
701 |
+
hidden_states,
|
702 |
+
attention_mask,
|
703 |
+
deterministic=deterministic,
|
704 |
+
output_attentions=output_attentions,
|
705 |
+
output_hidden_states=output_hidden_states,
|
706 |
+
return_dict=return_dict,
|
707 |
+
)
|
708 |
+
last_hidden_states = outputs[0]
|
709 |
+
last_hidden_states = self.layer_norm(last_hidden_states)
|
710 |
+
|
711 |
+
# update the last element in `hidden_states` after applying `layernorm` above
|
712 |
+
hidden_states = None
|
713 |
+
if output_hidden_states:
|
714 |
+
hidden_states = outputs[1]
|
715 |
+
hidden_states = hidden_states[:-1] + (last_hidden_states,)
|
716 |
+
|
717 |
+
if not return_dict:
|
718 |
+
outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
|
719 |
+
return tuple(v for v in outputs if v is not None)
|
720 |
+
|
721 |
+
return FlaxBaseModelOutput(
|
722 |
+
last_hidden_state=last_hidden_states,
|
723 |
+
hidden_states=hidden_states,
|
724 |
+
attentions=outputs.attentions,
|
725 |
+
)
|
726 |
+
|
727 |
+
|
728 |
+
class FlaxBlenderbotDecoder(nn.Module):
|
729 |
+
config: BlenderbotConfig
|
730 |
+
embed_tokens: nn.Embed
|
731 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
732 |
+
|
733 |
+
def setup(self):
|
734 |
+
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
|
735 |
+
|
736 |
+
embed_dim = self.config.d_model
|
737 |
+
self.padding_idx = self.config.pad_token_id
|
738 |
+
self.max_target_positions = self.config.max_position_embeddings
|
739 |
+
self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
|
740 |
+
|
741 |
+
self.embed_positions = nn.Embed(
|
742 |
+
self.config.max_position_embeddings,
|
743 |
+
embed_dim,
|
744 |
+
embedding_init=jax.nn.initializers.normal(self.config.init_std),
|
745 |
+
)
|
746 |
+
|
747 |
+
self.layers = FlaxBlenderbotDecoderLayerCollection(self.config, self.dtype)
|
748 |
+
self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
749 |
+
|
750 |
+
def __call__(
|
751 |
+
self,
|
752 |
+
input_ids,
|
753 |
+
attention_mask,
|
754 |
+
position_ids,
|
755 |
+
encoder_hidden_states: Optional[jnp.ndarray] = None,
|
756 |
+
encoder_attention_mask: Optional[jnp.ndarray] = None,
|
757 |
+
init_cache: bool = False,
|
758 |
+
output_attentions: bool = False,
|
759 |
+
output_hidden_states: bool = False,
|
760 |
+
return_dict: bool = True,
|
761 |
+
deterministic: bool = True,
|
762 |
+
):
|
763 |
+
input_shape = input_ids.shape
|
764 |
+
input_ids = input_ids.reshape(-1, input_shape[-1])
|
765 |
+
|
766 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
767 |
+
|
768 |
+
# embed positions
|
769 |
+
positions = self.embed_positions(position_ids)
|
770 |
+
|
771 |
+
hidden_states = inputs_embeds + positions
|
772 |
+
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
|
773 |
+
|
774 |
+
outputs = self.layers(
|
775 |
+
hidden_states,
|
776 |
+
attention_mask,
|
777 |
+
encoder_hidden_states,
|
778 |
+
encoder_attention_mask,
|
779 |
+
deterministic=deterministic,
|
780 |
+
init_cache=init_cache,
|
781 |
+
output_attentions=output_attentions,
|
782 |
+
output_hidden_states=output_hidden_states,
|
783 |
+
return_dict=return_dict,
|
784 |
+
)
|
785 |
+
|
786 |
+
last_hidden_states = outputs[0]
|
787 |
+
last_hidden_states = self.layer_norm(last_hidden_states)
|
788 |
+
|
789 |
+
# update the last element in `hidden_states` after applying `layernorm` above
|
790 |
+
hidden_states = None
|
791 |
+
if output_hidden_states:
|
792 |
+
hidden_states = outputs[1]
|
793 |
+
hidden_states = hidden_states[:-1] + (last_hidden_states,)
|
794 |
+
|
795 |
+
if not return_dict:
|
796 |
+
outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
|
797 |
+
return tuple(v for v in outputs if v is not None)
|
798 |
+
|
799 |
+
return FlaxBaseModelOutputWithPastAndCrossAttentions(
|
800 |
+
last_hidden_state=last_hidden_states,
|
801 |
+
hidden_states=hidden_states,
|
802 |
+
attentions=outputs.attentions,
|
803 |
+
cross_attentions=outputs.cross_attentions,
|
804 |
+
)
|
805 |
+
|
806 |
+
|
807 |
+
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->Blenderbot
|
808 |
+
class FlaxBlenderbotModule(nn.Module):
|
809 |
+
config: BlenderbotConfig
|
810 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
811 |
+
|
812 |
+
def setup(self):
|
813 |
+
self.shared = nn.Embed(
|
814 |
+
self.config.vocab_size,
|
815 |
+
self.config.d_model,
|
816 |
+
embedding_init=jax.nn.initializers.normal(self.config.init_std),
|
817 |
+
dtype=self.dtype,
|
818 |
+
)
|
819 |
+
|
820 |
+
self.encoder = FlaxBlenderbotEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
|
821 |
+
self.decoder = FlaxBlenderbotDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
|
822 |
+
|
823 |
+
def _get_encoder_module(self):
|
824 |
+
return self.encoder
|
825 |
+
|
826 |
+
def _get_decoder_module(self):
|
827 |
+
return self.decoder
|
828 |
+
|
829 |
+
def __call__(
|
830 |
+
self,
|
831 |
+
input_ids,
|
832 |
+
attention_mask,
|
833 |
+
decoder_input_ids,
|
834 |
+
decoder_attention_mask,
|
835 |
+
position_ids,
|
836 |
+
decoder_position_ids,
|
837 |
+
output_attentions: bool = False,
|
838 |
+
output_hidden_states: bool = False,
|
839 |
+
return_dict: bool = True,
|
840 |
+
deterministic: bool = True,
|
841 |
+
):
|
842 |
+
encoder_outputs = self.encoder(
|
843 |
+
input_ids=input_ids,
|
844 |
+
attention_mask=attention_mask,
|
845 |
+
position_ids=position_ids,
|
846 |
+
output_attentions=output_attentions,
|
847 |
+
output_hidden_states=output_hidden_states,
|
848 |
+
return_dict=return_dict,
|
849 |
+
deterministic=deterministic,
|
850 |
+
)
|
851 |
+
|
852 |
+
decoder_outputs = self.decoder(
|
853 |
+
input_ids=decoder_input_ids,
|
854 |
+
attention_mask=decoder_attention_mask,
|
855 |
+
position_ids=decoder_position_ids,
|
856 |
+
encoder_hidden_states=encoder_outputs[0],
|
857 |
+
encoder_attention_mask=attention_mask,
|
858 |
+
output_attentions=output_attentions,
|
859 |
+
output_hidden_states=output_hidden_states,
|
860 |
+
return_dict=return_dict,
|
861 |
+
deterministic=deterministic,
|
862 |
+
)
|
863 |
+
|
864 |
+
if not return_dict:
|
865 |
+
return decoder_outputs + encoder_outputs
|
866 |
+
|
867 |
+
return FlaxSeq2SeqModelOutput(
|
868 |
+
last_hidden_state=decoder_outputs.last_hidden_state,
|
869 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
870 |
+
decoder_attentions=decoder_outputs.attentions,
|
871 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
872 |
+
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
873 |
+
encoder_hidden_states=encoder_outputs.hidden_states,
|
874 |
+
encoder_attentions=encoder_outputs.attentions,
|
875 |
+
)
|
876 |
+
|
877 |
+
|
878 |
+
class FlaxBlenderbotPreTrainedModel(FlaxPreTrainedModel):
|
879 |
+
config_class = BlenderbotConfig
|
880 |
+
base_model_prefix: str = "model"
|
881 |
+
module_class: nn.Module = None
|
882 |
+
|
883 |
+
def __init__(
|
884 |
+
self,
|
885 |
+
config: BlenderbotConfig,
|
886 |
+
input_shape: Tuple[int] = (1, 1),
|
887 |
+
seed: int = 0,
|
888 |
+
dtype: jnp.dtype = jnp.float32,
|
889 |
+
_do_init: bool = True,
|
890 |
+
**kwargs,
|
891 |
+
):
|
892 |
+
module = self.module_class(config=config, dtype=dtype, **kwargs)
|
893 |
+
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
|
894 |
+
|
895 |
+
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
|
896 |
+
# init input tensors
|
897 |
+
input_ids = jnp.zeros(input_shape, dtype="i4")
|
898 |
+
# make sure initialization pass will work for FlaxBlenderbotForSequenceClassificationModule
|
899 |
+
input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
|
900 |
+
attention_mask = jnp.ones_like(input_ids)
|
901 |
+
decoder_input_ids = input_ids
|
902 |
+
decoder_attention_mask = jnp.ones_like(input_ids)
|
903 |
+
|
904 |
+
batch_size, sequence_length = input_ids.shape
|
905 |
+
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
|
906 |
+
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
|
907 |
+
|
908 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
909 |
+
rngs = {"params": params_rng, "dropout": dropout_rng}
|
910 |
+
|
911 |
+
random_params = self.module.init(
|
912 |
+
rngs,
|
913 |
+
input_ids,
|
914 |
+
attention_mask,
|
915 |
+
decoder_input_ids,
|
916 |
+
decoder_attention_mask,
|
917 |
+
position_ids,
|
918 |
+
decoder_position_ids,
|
919 |
+
)["params"]
|
920 |
+
|
921 |
+
if params is not None:
|
922 |
+
random_params = flatten_dict(unfreeze(random_params))
|
923 |
+
params = flatten_dict(unfreeze(params))
|
924 |
+
for missing_key in self._missing_keys:
|
925 |
+
params[missing_key] = random_params[missing_key]
|
926 |
+
self._missing_keys = set()
|
927 |
+
return freeze(unflatten_dict(params))
|
928 |
+
else:
|
929 |
+
return random_params
|
930 |
+
|
931 |
+
def init_cache(self, batch_size, max_length, encoder_outputs):
|
932 |
+
r"""
|
933 |
+
Args:
|
934 |
+
batch_size (`int`):
|
935 |
+
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
|
936 |
+
max_length (`int`):
|
937 |
+
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
|
938 |
+
cache.
|
939 |
+
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
|
940 |
+
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
|
941 |
+
`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
|
942 |
+
is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
|
943 |
+
cross-attention of the decoder.
|
944 |
+
"""
|
945 |
+
# init input variables to retrieve cache
|
946 |
+
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
|
947 |
+
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
|
948 |
+
decoder_position_ids = jnp.broadcast_to(
|
949 |
+
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
|
950 |
+
)
|
951 |
+
|
952 |
+
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
|
953 |
+
decoder_module = module._get_decoder_module()
|
954 |
+
return decoder_module(
|
955 |
+
decoder_input_ids,
|
956 |
+
decoder_attention_mask,
|
957 |
+
decoder_position_ids,
|
958 |
+
**kwargs,
|
959 |
+
)
|
960 |
+
|
961 |
+
init_variables = self.module.init(
|
962 |
+
jax.random.PRNGKey(0),
|
963 |
+
decoder_input_ids=decoder_input_ids,
|
964 |
+
decoder_attention_mask=decoder_attention_mask,
|
965 |
+
decoder_position_ids=decoder_position_ids,
|
966 |
+
encoder_hidden_states=encoder_outputs[0],
|
967 |
+
init_cache=True,
|
968 |
+
method=_decoder_forward, # we only need to call the decoder to init the cache
|
969 |
+
)
|
970 |
+
return unfreeze(init_variables["cache"])
|
971 |
+
|
972 |
+
@add_start_docstrings(BLENDERBOT_ENCODE_INPUTS_DOCSTRING)
|
973 |
+
@replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotConfig)
|
974 |
+
def encode(
|
975 |
+
self,
|
976 |
+
input_ids: jnp.ndarray,
|
977 |
+
attention_mask: Optional[jnp.ndarray] = None,
|
978 |
+
position_ids: Optional[jnp.ndarray] = None,
|
979 |
+
output_attentions: Optional[bool] = None,
|
980 |
+
output_hidden_states: Optional[bool] = None,
|
981 |
+
return_dict: Optional[bool] = None,
|
982 |
+
train: bool = False,
|
983 |
+
params: dict = None,
|
984 |
+
dropout_rng: PRNGKey = None,
|
985 |
+
):
|
986 |
+
r"""
|
987 |
+
Returns:
|
988 |
+
|
989 |
+
Example:
|
990 |
+
|
991 |
+
```python
|
992 |
+
>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
|
993 |
+
|
994 |
+
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
|
995 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
996 |
+
|
997 |
+
>>> text = "My friends are cool but they eat too many carbs."
|
998 |
+
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
|
999 |
+
>>> encoder_outputs = model.encode(**inputs)
|
1000 |
+
```"""
|
1001 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1002 |
+
output_hidden_states = (
|
1003 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1004 |
+
)
|
1005 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1006 |
+
|
1007 |
+
if attention_mask is None:
|
1008 |
+
attention_mask = jnp.ones_like(input_ids)
|
1009 |
+
if position_ids is None:
|
1010 |
+
batch_size, sequence_length = input_ids.shape
|
1011 |
+
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
|
1012 |
+
|
1013 |
+
# Handle any PRNG if needed
|
1014 |
+
rngs = {}
|
1015 |
+
if dropout_rng is not None:
|
1016 |
+
rngs["dropout"] = dropout_rng
|
1017 |
+
|
1018 |
+
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
|
1019 |
+
encode_module = module._get_encoder_module()
|
1020 |
+
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
|
1021 |
+
|
1022 |
+
return self.module.apply(
|
1023 |
+
{"params": params or self.params},
|
1024 |
+
input_ids=jnp.array(input_ids, dtype="i4"),
|
1025 |
+
attention_mask=jnp.array(attention_mask, dtype="i4"),
|
1026 |
+
position_ids=jnp.array(position_ids, dtype="i4"),
|
1027 |
+
output_attentions=output_attentions,
|
1028 |
+
output_hidden_states=output_hidden_states,
|
1029 |
+
return_dict=return_dict,
|
1030 |
+
deterministic=not train,
|
1031 |
+
rngs=rngs,
|
1032 |
+
method=_encoder_forward,
|
1033 |
+
)
|
1034 |
+
|
1035 |
+
@add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING)
|
1036 |
+
@replace_return_docstrings(
|
1037 |
+
output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotConfig
|
1038 |
+
)
|
1039 |
+
def decode(
|
1040 |
+
self,
|
1041 |
+
decoder_input_ids,
|
1042 |
+
encoder_outputs,
|
1043 |
+
encoder_attention_mask: Optional[jnp.ndarray] = None,
|
1044 |
+
decoder_attention_mask: Optional[jnp.ndarray] = None,
|
1045 |
+
decoder_position_ids: Optional[jnp.ndarray] = None,
|
1046 |
+
past_key_values: dict = None,
|
1047 |
+
output_attentions: Optional[bool] = None,
|
1048 |
+
output_hidden_states: Optional[bool] = None,
|
1049 |
+
return_dict: Optional[bool] = None,
|
1050 |
+
train: bool = False,
|
1051 |
+
params: dict = None,
|
1052 |
+
dropout_rng: PRNGKey = None,
|
1053 |
+
):
|
1054 |
+
r"""
|
1055 |
+
Returns:
|
1056 |
+
|
1057 |
+
Example:
|
1058 |
+
|
1059 |
+
```python
|
1060 |
+
>>> import jax.numpy as jnp
|
1061 |
+
>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
|
1062 |
+
|
1063 |
+
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
|
1064 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
1065 |
+
|
1066 |
+
>>> text = "My friends are cool but they eat too many carbs."
|
1067 |
+
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
|
1068 |
+
>>> encoder_outputs = model.encode(**inputs)
|
1069 |
+
|
1070 |
+
>>> decoder_start_token_id = model.config.decoder_start_token_id
|
1071 |
+
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
|
1072 |
+
|
1073 |
+
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
|
1074 |
+
>>> last_decoder_hidden_states = outputs.last_hidden_state
|
1075 |
+
```"""
|
1076 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1077 |
+
output_hidden_states = (
|
1078 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1079 |
+
)
|
1080 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1081 |
+
|
1082 |
+
encoder_hidden_states = encoder_outputs[0]
|
1083 |
+
if encoder_attention_mask is None:
|
1084 |
+
batch_size, sequence_length = encoder_hidden_states.shape[:2]
|
1085 |
+
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
|
1086 |
+
|
1087 |
+
batch_size, sequence_length = decoder_input_ids.shape
|
1088 |
+
if decoder_attention_mask is None:
|
1089 |
+
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
|
1090 |
+
|
1091 |
+
if decoder_position_ids is None:
|
1092 |
+
if past_key_values is not None:
|
1093 |
+
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
|
1094 |
+
|
1095 |
+
decoder_position_ids = jnp.broadcast_to(
|
1096 |
+
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
|
1097 |
+
)
|
1098 |
+
|
1099 |
+
# Handle any PRNG if needed
|
1100 |
+
rngs = {}
|
1101 |
+
if dropout_rng is not None:
|
1102 |
+
rngs["dropout"] = dropout_rng
|
1103 |
+
|
1104 |
+
inputs = {"params": params or self.params}
|
1105 |
+
|
1106 |
+
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
|
1107 |
+
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
|
1108 |
+
# it can be changed by FlaxBlenderbotAttention module
|
1109 |
+
if past_key_values:
|
1110 |
+
inputs["cache"] = past_key_values
|
1111 |
+
mutable = ["cache"]
|
1112 |
+
else:
|
1113 |
+
mutable = False
|
1114 |
+
|
1115 |
+
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
|
1116 |
+
decoder_module = module._get_decoder_module()
|
1117 |
+
return decoder_module(
|
1118 |
+
decoder_input_ids,
|
1119 |
+
decoder_attention_mask,
|
1120 |
+
decoder_position_ids,
|
1121 |
+
**kwargs,
|
1122 |
+
)
|
1123 |
+
|
1124 |
+
outputs = self.module.apply(
|
1125 |
+
inputs,
|
1126 |
+
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
|
1127 |
+
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
|
1128 |
+
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
|
1129 |
+
encoder_hidden_states=encoder_hidden_states,
|
1130 |
+
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
|
1131 |
+
output_attentions=output_attentions,
|
1132 |
+
output_hidden_states=output_hidden_states,
|
1133 |
+
return_dict=return_dict,
|
1134 |
+
deterministic=not train,
|
1135 |
+
rngs=rngs,
|
1136 |
+
mutable=mutable,
|
1137 |
+
method=_decoder_forward,
|
1138 |
+
)
|
1139 |
+
|
1140 |
+
# add updated cache to model output
|
1141 |
+
if past_key_values is not None and return_dict:
|
1142 |
+
outputs, past = outputs
|
1143 |
+
outputs["past_key_values"] = unfreeze(past["cache"])
|
1144 |
+
return outputs
|
1145 |
+
elif past_key_values is not None and not return_dict:
|
1146 |
+
outputs, past = outputs
|
1147 |
+
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
|
1148 |
+
|
1149 |
+
return outputs
|
1150 |
+
|
1151 |
+
@add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
|
1152 |
+
def __call__(
|
1153 |
+
self,
|
1154 |
+
input_ids: jnp.ndarray,
|
1155 |
+
attention_mask: Optional[jnp.ndarray] = None,
|
1156 |
+
decoder_input_ids: Optional[jnp.ndarray] = None,
|
1157 |
+
decoder_attention_mask: Optional[jnp.ndarray] = None,
|
1158 |
+
position_ids: Optional[jnp.ndarray] = None,
|
1159 |
+
decoder_position_ids: Optional[jnp.ndarray] = None,
|
1160 |
+
output_attentions: Optional[bool] = None,
|
1161 |
+
output_hidden_states: Optional[bool] = None,
|
1162 |
+
return_dict: Optional[bool] = None,
|
1163 |
+
train: bool = False,
|
1164 |
+
params: dict = None,
|
1165 |
+
dropout_rng: PRNGKey = None,
|
1166 |
+
):
|
1167 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1168 |
+
output_hidden_states = (
|
1169 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1170 |
+
)
|
1171 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1172 |
+
|
1173 |
+
# prepare encoder inputs
|
1174 |
+
if attention_mask is None:
|
1175 |
+
attention_mask = jnp.ones_like(input_ids)
|
1176 |
+
if position_ids is None:
|
1177 |
+
batch_size, sequence_length = input_ids.shape
|
1178 |
+
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
|
1179 |
+
|
1180 |
+
# prepare decoder inputs
|
1181 |
+
if decoder_input_ids is None:
|
1182 |
+
decoder_input_ids = shift_tokens_right(
|
1183 |
+
input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
|
1184 |
+
)
|
1185 |
+
if decoder_attention_mask is None:
|
1186 |
+
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
|
1187 |
+
if decoder_position_ids is None:
|
1188 |
+
batch_size, sequence_length = decoder_input_ids.shape
|
1189 |
+
decoder_position_ids = jnp.broadcast_to(
|
1190 |
+
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
|
1191 |
+
)
|
1192 |
+
|
1193 |
+
# Handle any PRNG if needed
|
1194 |
+
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
|
1195 |
+
|
1196 |
+
return self.module.apply(
|
1197 |
+
{"params": params or self.params},
|
1198 |
+
input_ids=jnp.array(input_ids, dtype="i4"),
|
1199 |
+
attention_mask=jnp.array(attention_mask, dtype="i4"),
|
1200 |
+
position_ids=jnp.array(position_ids, dtype="i4"),
|
1201 |
+
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
|
1202 |
+
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
|
1203 |
+
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
|
1204 |
+
output_attentions=output_attentions,
|
1205 |
+
output_hidden_states=output_hidden_states,
|
1206 |
+
return_dict=return_dict,
|
1207 |
+
deterministic=not train,
|
1208 |
+
rngs=rngs,
|
1209 |
+
)
|
1210 |
+
|
1211 |
+
|
1212 |
+
@add_start_docstrings(
|
1213 |
+
"The bare MBart Model transformer outputting raw hidden-states without any specific head on top.",
|
1214 |
+
BLENDERBOT_START_DOCSTRING,
|
1215 |
+
)
|
1216 |
+
class FlaxBlenderbotModel(FlaxBlenderbotPreTrainedModel):
|
1217 |
+
config: BlenderbotConfig
|
1218 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
1219 |
+
module_class = FlaxBlenderbotModule
|
1220 |
+
|
1221 |
+
|
1222 |
+
append_call_sample_docstring(FlaxBlenderbotModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
|
1223 |
+
|
1224 |
+
|
1225 |
+
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->Blenderbot
|
1226 |
+
class FlaxBlenderbotForConditionalGenerationModule(nn.Module):
|
1227 |
+
config: BlenderbotConfig
|
1228 |
+
dtype: jnp.dtype = jnp.float32
|
1229 |
+
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
|
1230 |
+
|
1231 |
+
def setup(self):
|
1232 |
+
self.model = FlaxBlenderbotModule(config=self.config, dtype=self.dtype)
|
1233 |
+
self.lm_head = nn.Dense(
|
1234 |
+
self.model.shared.num_embeddings,
|
1235 |
+
use_bias=False,
|
1236 |
+
dtype=self.dtype,
|
1237 |
+
kernel_init=jax.nn.initializers.normal(self.config.init_std),
|
1238 |
+
)
|
1239 |
+
self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
|
1240 |
+
|
1241 |
+
def _get_encoder_module(self):
|
1242 |
+
return self.model.encoder
|
1243 |
+
|
1244 |
+
def _get_decoder_module(self):
|
1245 |
+
return self.model.decoder
|
1246 |
+
|
1247 |
+
def __call__(
|
1248 |
+
self,
|
1249 |
+
input_ids,
|
1250 |
+
attention_mask,
|
1251 |
+
decoder_input_ids,
|
1252 |
+
decoder_attention_mask,
|
1253 |
+
position_ids,
|
1254 |
+
decoder_position_ids,
|
1255 |
+
output_attentions: bool = False,
|
1256 |
+
output_hidden_states: bool = False,
|
1257 |
+
return_dict: bool = True,
|
1258 |
+
deterministic: bool = True,
|
1259 |
+
):
|
1260 |
+
outputs = self.model(
|
1261 |
+
input_ids=input_ids,
|
1262 |
+
attention_mask=attention_mask,
|
1263 |
+
decoder_input_ids=decoder_input_ids,
|
1264 |
+
decoder_attention_mask=decoder_attention_mask,
|
1265 |
+
position_ids=position_ids,
|
1266 |
+
decoder_position_ids=decoder_position_ids,
|
1267 |
+
output_attentions=output_attentions,
|
1268 |
+
output_hidden_states=output_hidden_states,
|
1269 |
+
return_dict=return_dict,
|
1270 |
+
deterministic=deterministic,
|
1271 |
+
)
|
1272 |
+
|
1273 |
+
hidden_states = outputs[0]
|
1274 |
+
|
1275 |
+
if self.config.tie_word_embeddings:
|
1276 |
+
shared_embedding = self.model.variables["params"]["shared"]["embedding"]
|
1277 |
+
lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
|
1278 |
+
else:
|
1279 |
+
lm_logits = self.lm_head(hidden_states)
|
1280 |
+
|
1281 |
+
lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
|
1282 |
+
|
1283 |
+
if not return_dict:
|
1284 |
+
output = (lm_logits,) + outputs[1:]
|
1285 |
+
return output
|
1286 |
+
|
1287 |
+
return FlaxSeq2SeqLMOutput(
|
1288 |
+
logits=lm_logits,
|
1289 |
+
decoder_hidden_states=outputs.decoder_hidden_states,
|
1290 |
+
decoder_attentions=outputs.decoder_attentions,
|
1291 |
+
cross_attentions=outputs.cross_attentions,
|
1292 |
+
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
|
1293 |
+
encoder_hidden_states=outputs.encoder_hidden_states,
|
1294 |
+
encoder_attentions=outputs.encoder_attentions,
|
1295 |
+
)
|
1296 |
+
|
1297 |
+
|
1298 |
+
@add_start_docstrings(
|
1299 |
+
"The Blenderbot Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING
|
1300 |
+
)
|
1301 |
+
class FlaxBlenderbotForConditionalGeneration(FlaxBlenderbotPreTrainedModel):
|
1302 |
+
module_class = FlaxBlenderbotForConditionalGenerationModule
|
1303 |
+
dtype: jnp.dtype = jnp.float32
|
1304 |
+
|
1305 |
+
@add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING)
|
1306 |
+
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotConfig)
|
1307 |
+
def decode(
|
1308 |
+
self,
|
1309 |
+
decoder_input_ids,
|
1310 |
+
encoder_outputs,
|
1311 |
+
encoder_attention_mask: Optional[jnp.ndarray] = None,
|
1312 |
+
decoder_attention_mask: Optional[jnp.ndarray] = None,
|
1313 |
+
decoder_position_ids: Optional[jnp.ndarray] = None,
|
1314 |
+
past_key_values: dict = None,
|
1315 |
+
output_attentions: Optional[bool] = None,
|
1316 |
+
output_hidden_states: Optional[bool] = None,
|
1317 |
+
return_dict: Optional[bool] = None,
|
1318 |
+
train: bool = False,
|
1319 |
+
params: dict = None,
|
1320 |
+
dropout_rng: PRNGKey = None,
|
1321 |
+
):
|
1322 |
+
r"""
|
1323 |
+
Returns:
|
1324 |
+
|
1325 |
+
Example:
|
1326 |
+
|
1327 |
+
```python
|
1328 |
+
>>> import jax.numpy as jnp
|
1329 |
+
>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
|
1330 |
+
|
1331 |
+
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
|
1332 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
1333 |
+
|
1334 |
+
>>> text = "My friends are cool but they eat too many carbs."
|
1335 |
+
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
|
1336 |
+
>>> encoder_outputs = model.encode(**inputs)
|
1337 |
+
|
1338 |
+
>>> decoder_start_token_id = model.config.decoder_start_token_id
|
1339 |
+
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
|
1340 |
+
|
1341 |
+
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
|
1342 |
+
>>> logits = outputs.logits
|
1343 |
+
```"""
|
1344 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1345 |
+
output_hidden_states = (
|
1346 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1347 |
+
)
|
1348 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1349 |
+
|
1350 |
+
encoder_hidden_states = encoder_outputs[0]
|
1351 |
+
if encoder_attention_mask is None:
|
1352 |
+
batch_size, sequence_length = encoder_hidden_states.shape[:2]
|
1353 |
+
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
|
1354 |
+
|
1355 |
+
batch_size, sequence_length = decoder_input_ids.shape
|
1356 |
+
if decoder_attention_mask is None:
|
1357 |
+
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
|
1358 |
+
|
1359 |
+
if decoder_position_ids is None:
|
1360 |
+
if past_key_values is not None:
|
1361 |
+
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
|
1362 |
+
|
1363 |
+
decoder_position_ids = jnp.broadcast_to(
|
1364 |
+
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
|
1365 |
+
)
|
1366 |
+
|
1367 |
+
# Handle any PRNG if needed
|
1368 |
+
rngs = {}
|
1369 |
+
if dropout_rng is not None:
|
1370 |
+
rngs["dropout"] = dropout_rng
|
1371 |
+
|
1372 |
+
inputs = {"params": params or self.params}
|
1373 |
+
|
1374 |
+
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
|
1375 |
+
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
|
1376 |
+
# it can be changed by FlaxBlenderbotAttention module
|
1377 |
+
if past_key_values:
|
1378 |
+
inputs["cache"] = past_key_values
|
1379 |
+
mutable = ["cache"]
|
1380 |
+
else:
|
1381 |
+
mutable = False
|
1382 |
+
|
1383 |
+
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
|
1384 |
+
decoder_module = module._get_decoder_module()
|
1385 |
+
outputs = decoder_module(
|
1386 |
+
decoder_input_ids,
|
1387 |
+
decoder_attention_mask,
|
1388 |
+
decoder_position_ids,
|
1389 |
+
**kwargs,
|
1390 |
+
)
|
1391 |
+
hidden_states = outputs[0]
|
1392 |
+
|
1393 |
+
if self.config.tie_word_embeddings:
|
1394 |
+
shared_embedding = module.model.variables["params"]["shared"]["embedding"]
|
1395 |
+
lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
|
1396 |
+
else:
|
1397 |
+
lm_logits = module.lm_head(hidden_states)
|
1398 |
+
|
1399 |
+
lm_logits += module.final_logits_bias
|
1400 |
+
return lm_logits, outputs
|
1401 |
+
|
1402 |
+
outputs = self.module.apply(
|
1403 |
+
inputs,
|
1404 |
+
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
|
1405 |
+
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
|
1406 |
+
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
|
1407 |
+
encoder_hidden_states=encoder_hidden_states,
|
1408 |
+
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
|
1409 |
+
output_attentions=output_attentions,
|
1410 |
+
output_hidden_states=output_hidden_states,
|
1411 |
+
return_dict=return_dict,
|
1412 |
+
deterministic=not train,
|
1413 |
+
rngs=rngs,
|
1414 |
+
mutable=mutable,
|
1415 |
+
method=_decoder_forward,
|
1416 |
+
)
|
1417 |
+
|
1418 |
+
if past_key_values is None:
|
1419 |
+
lm_logits, decoder_outputs = outputs
|
1420 |
+
else:
|
1421 |
+
(lm_logits, decoder_outputs), past = outputs
|
1422 |
+
|
1423 |
+
if return_dict:
|
1424 |
+
outputs = FlaxCausalLMOutputWithCrossAttentions(
|
1425 |
+
logits=lm_logits,
|
1426 |
+
hidden_states=decoder_outputs.hidden_states,
|
1427 |
+
attentions=decoder_outputs.attentions,
|
1428 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
1429 |
+
)
|
1430 |
+
else:
|
1431 |
+
outputs = (lm_logits,) + decoder_outputs[1:]
|
1432 |
+
|
1433 |
+
# add updated cache to model output
|
1434 |
+
if past_key_values is not None and return_dict:
|
1435 |
+
outputs["past_key_values"] = unfreeze(past["cache"])
|
1436 |
+
return outputs
|
1437 |
+
elif past_key_values is not None and not return_dict:
|
1438 |
+
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
|
1439 |
+
|
1440 |
+
return outputs
|
1441 |
+
|
1442 |
+
def prepare_inputs_for_generation(
|
1443 |
+
self,
|
1444 |
+
decoder_input_ids,
|
1445 |
+
max_length,
|
1446 |
+
attention_mask: Optional[jax.Array] = None,
|
1447 |
+
decoder_attention_mask: Optional[jax.Array] = None,
|
1448 |
+
encoder_outputs=None,
|
1449 |
+
**kwargs,
|
1450 |
+
):
|
1451 |
+
# initializing the cache
|
1452 |
+
batch_size, seq_length = decoder_input_ids.shape
|
1453 |
+
|
1454 |
+
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
|
1455 |
+
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
|
1456 |
+
# But since the decoder uses a causal mask, those positions are masked anyways.
|
1457 |
+
# Thus we can create a single static attention_mask here, which is more efficient for compilation
|
1458 |
+
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
|
1459 |
+
if decoder_attention_mask is not None:
|
1460 |
+
position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
|
1461 |
+
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
|
1462 |
+
else:
|
1463 |
+
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
|
1464 |
+
|
1465 |
+
return {
|
1466 |
+
"past_key_values": past_key_values,
|
1467 |
+
"encoder_outputs": encoder_outputs,
|
1468 |
+
"encoder_attention_mask": attention_mask,
|
1469 |
+
"decoder_attention_mask": extended_attention_mask,
|
1470 |
+
"decoder_position_ids": position_ids,
|
1471 |
+
}
|
1472 |
+
|
1473 |
+
def update_inputs_for_generation(self, model_outputs, model_kwargs):
|
1474 |
+
model_kwargs["past_key_values"] = model_outputs.past_key_values
|
1475 |
+
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
|
1476 |
+
return model_kwargs
|
1477 |
+
|
1478 |
+
|
1479 |
+
FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING = r"""
|
1480 |
+
Returns:
|
1481 |
+
|
1482 |
+
Conversation example::
|
1483 |
+
|
1484 |
+
```py
|
1485 |
+
>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
|
1486 |
+
|
1487 |
+
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
|
1488 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
1489 |
+
|
1490 |
+
>>> UTTERANCE = "My friends are cool but they eat too many carbs."
|
1491 |
+
>>> inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors="np")
|
1492 |
+
|
1493 |
+
>>> # Generate Reply
|
1494 |
+
>>> reply_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5, early_stopping=True).sequences
|
1495 |
+
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])
|
1496 |
+
```
|
1497 |
+
"""
|
1498 |
+
|
1499 |
+
overwrite_call_docstring(
|
1500 |
+
FlaxBlenderbotForConditionalGeneration,
|
1501 |
+
BLENDERBOT_INPUTS_DOCSTRING + FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING,
|
1502 |
+
)
|
1503 |
+
append_replace_return_docstrings(
|
1504 |
+
FlaxBlenderbotForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
|
1505 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py
ADDED
@@ -0,0 +1,1556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" TF 2.0 Blenderbot model."""
|
16 |
+
|
17 |
+
|
18 |
+
from __future__ import annotations
|
19 |
+
|
20 |
+
import os
|
21 |
+
import random
|
22 |
+
import warnings
|
23 |
+
from typing import List, Optional, Tuple, Union
|
24 |
+
|
25 |
+
import tensorflow as tf
|
26 |
+
|
27 |
+
from ...activations_tf import get_tf_activation
|
28 |
+
from ...modeling_tf_outputs import (
|
29 |
+
TFBaseModelOutput,
|
30 |
+
TFBaseModelOutputWithPastAndCrossAttentions,
|
31 |
+
TFSeq2SeqLMOutput,
|
32 |
+
TFSeq2SeqModelOutput,
|
33 |
+
)
|
34 |
+
|
35 |
+
# Public API
|
36 |
+
from ...modeling_tf_utils import (
|
37 |
+
TFCausalLanguageModelingLoss,
|
38 |
+
TFPreTrainedModel,
|
39 |
+
keras,
|
40 |
+
keras_serializable,
|
41 |
+
unpack_inputs,
|
42 |
+
)
|
43 |
+
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
|
44 |
+
from ...utils import (
|
45 |
+
add_code_sample_docstrings,
|
46 |
+
add_end_docstrings,
|
47 |
+
add_start_docstrings,
|
48 |
+
add_start_docstrings_to_model_forward,
|
49 |
+
logging,
|
50 |
+
replace_return_docstrings,
|
51 |
+
)
|
52 |
+
from .configuration_blenderbot import BlenderbotConfig
|
53 |
+
|
54 |
+
|
55 |
+
logger = logging.get_logger(__name__)
|
56 |
+
|
57 |
+
_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
|
58 |
+
_CONFIG_FOR_DOC = "BlenderbotConfig"
|
59 |
+
|
60 |
+
|
61 |
+
LARGE_NEGATIVE = -1e8
|
62 |
+
|
63 |
+
|
64 |
+
# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
|
65 |
+
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
|
66 |
+
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
|
67 |
+
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
|
68 |
+
start_tokens = tf.fill(
|
69 |
+
(shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
|
70 |
+
)
|
71 |
+
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
|
72 |
+
# replace possible -100 values in labels by `pad_token_id`
|
73 |
+
shifted_input_ids = tf.where(
|
74 |
+
shifted_input_ids == -100,
|
75 |
+
tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
|
76 |
+
shifted_input_ids,
|
77 |
+
)
|
78 |
+
|
79 |
+
# "Verify that `labels` has only positive values and -100"
|
80 |
+
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
|
81 |
+
|
82 |
+
# Make sure the assertion op is called by wrapping the result in an identity no-op
|
83 |
+
with tf.control_dependencies([assert_gte0]):
|
84 |
+
shifted_input_ids = tf.identity(shifted_input_ids)
|
85 |
+
|
86 |
+
return shifted_input_ids
|
87 |
+
|
88 |
+
|
89 |
+
# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
|
90 |
+
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
|
91 |
+
"""
|
92 |
+
Make causal mask used for bi-directional self-attention.
|
93 |
+
"""
|
94 |
+
bsz = input_ids_shape[0]
|
95 |
+
tgt_len = input_ids_shape[1]
|
96 |
+
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
|
97 |
+
mask_cond = tf.range(shape_list(mask)[-1])
|
98 |
+
|
99 |
+
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
|
100 |
+
|
101 |
+
if past_key_values_length > 0:
|
102 |
+
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
|
103 |
+
|
104 |
+
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
|
105 |
+
|
106 |
+
|
107 |
+
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
|
108 |
+
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
|
109 |
+
"""
|
110 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
111 |
+
"""
|
112 |
+
src_len = shape_list(mask)[1]
|
113 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
114 |
+
one_cst = tf.constant(1.0)
|
115 |
+
mask = tf.cast(mask, dtype=one_cst.dtype)
|
116 |
+
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
|
117 |
+
|
118 |
+
return (one_cst - expanded_mask) * LARGE_NEGATIVE
|
119 |
+
|
120 |
+
|
121 |
+
class TFBlenderbotLearnedPositionalEmbedding(keras.layers.Embedding):
|
122 |
+
"""
|
123 |
+
This module learns positional embeddings up to a fixed maximum size.
|
124 |
+
"""
|
125 |
+
|
126 |
+
def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
|
127 |
+
super().__init__(num_embeddings, embedding_dim, **kwargs)
|
128 |
+
|
129 |
+
def call(
|
130 |
+
self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
|
131 |
+
):
|
132 |
+
"""Input is expected to be of size [bsz x seqlen]."""
|
133 |
+
if position_ids is None:
|
134 |
+
seq_len = input_shape[1]
|
135 |
+
position_ids = tf.range(seq_len, delta=1, name="range")
|
136 |
+
position_ids += past_key_values_length
|
137 |
+
|
138 |
+
return super().call(tf.cast(position_ids, dtype=tf.int32))
|
139 |
+
|
140 |
+
|
141 |
+
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Blenderbot
|
142 |
+
class TFBlenderbotAttention(keras.layers.Layer):
|
143 |
+
"""Multi-headed attention from "Attention Is All You Need"""
|
144 |
+
|
145 |
+
def __init__(
|
146 |
+
self,
|
147 |
+
embed_dim: int,
|
148 |
+
num_heads: int,
|
149 |
+
dropout: float = 0.0,
|
150 |
+
is_decoder: bool = False,
|
151 |
+
bias: bool = True,
|
152 |
+
**kwargs,
|
153 |
+
):
|
154 |
+
super().__init__(**kwargs)
|
155 |
+
self.embed_dim = embed_dim
|
156 |
+
|
157 |
+
self.num_heads = num_heads
|
158 |
+
self.dropout = keras.layers.Dropout(dropout)
|
159 |
+
self.head_dim = embed_dim // num_heads
|
160 |
+
if (self.head_dim * num_heads) != self.embed_dim:
|
161 |
+
raise ValueError(
|
162 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
163 |
+
f" and `num_heads`: {num_heads})."
|
164 |
+
)
|
165 |
+
self.scaling = self.head_dim**-0.5
|
166 |
+
self.is_decoder = is_decoder
|
167 |
+
|
168 |
+
self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
|
169 |
+
self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
|
170 |
+
self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
|
171 |
+
self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
|
172 |
+
|
173 |
+
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
|
174 |
+
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
|
175 |
+
|
176 |
+
def call(
|
177 |
+
self,
|
178 |
+
hidden_states: tf.Tensor,
|
179 |
+
key_value_states: tf.Tensor | None = None,
|
180 |
+
past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
|
181 |
+
attention_mask: tf.Tensor | None = None,
|
182 |
+
layer_head_mask: tf.Tensor | None = None,
|
183 |
+
training: Optional[bool] = False,
|
184 |
+
) -> Tuple[tf.Tensor, tf.Tensor | None]:
|
185 |
+
"""Input shape: Batch x Time x Channel"""
|
186 |
+
|
187 |
+
# if key_value_states are provided this layer is used as a cross-attention layer
|
188 |
+
# for the decoder
|
189 |
+
is_cross_attention = key_value_states is not None
|
190 |
+
bsz, tgt_len, embed_dim = shape_list(hidden_states)
|
191 |
+
|
192 |
+
# get query proj
|
193 |
+
query_states = self.q_proj(hidden_states) * self.scaling
|
194 |
+
# get key, value proj
|
195 |
+
if is_cross_attention and past_key_value is not None:
|
196 |
+
# reuse k,v, cross_attentions
|
197 |
+
key_states = past_key_value[0]
|
198 |
+
value_states = past_key_value[1]
|
199 |
+
elif is_cross_attention:
|
200 |
+
# cross_attentions
|
201 |
+
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
|
202 |
+
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
|
203 |
+
elif past_key_value is not None:
|
204 |
+
# reuse k, v, self_attention
|
205 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
206 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
207 |
+
key_states = tf.concat([past_key_value[0], key_states], axis=2)
|
208 |
+
value_states = tf.concat([past_key_value[1], value_states], axis=2)
|
209 |
+
else:
|
210 |
+
# self_attention
|
211 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
212 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
213 |
+
|
214 |
+
if self.is_decoder:
|
215 |
+
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
|
216 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
217 |
+
# key/value_states (first "if" case)
|
218 |
+
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
|
219 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
220 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
221 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
222 |
+
past_key_value = (key_states, value_states)
|
223 |
+
|
224 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
225 |
+
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
|
226 |
+
key_states = tf.reshape(key_states, proj_shape)
|
227 |
+
value_states = tf.reshape(value_states, proj_shape)
|
228 |
+
|
229 |
+
src_len = shape_list(key_states)[1]
|
230 |
+
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
|
231 |
+
|
232 |
+
tf.debugging.assert_equal(
|
233 |
+
shape_list(attn_weights),
|
234 |
+
[bsz * self.num_heads, tgt_len, src_len],
|
235 |
+
message=(
|
236 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
237 |
+
f" {shape_list(attn_weights)}"
|
238 |
+
),
|
239 |
+
)
|
240 |
+
|
241 |
+
if attention_mask is not None:
|
242 |
+
tf.debugging.assert_equal(
|
243 |
+
shape_list(attention_mask),
|
244 |
+
[bsz, 1, tgt_len, src_len],
|
245 |
+
message=(
|
246 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
247 |
+
f" {shape_list(attention_mask)}"
|
248 |
+
),
|
249 |
+
)
|
250 |
+
|
251 |
+
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
|
252 |
+
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
|
253 |
+
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
|
254 |
+
|
255 |
+
attn_weights = stable_softmax(attn_weights, axis=-1)
|
256 |
+
|
257 |
+
if layer_head_mask is not None:
|
258 |
+
tf.debugging.assert_equal(
|
259 |
+
shape_list(layer_head_mask),
|
260 |
+
[self.num_heads],
|
261 |
+
message=(
|
262 |
+
f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
|
263 |
+
f" {shape_list(layer_head_mask)}"
|
264 |
+
),
|
265 |
+
)
|
266 |
+
|
267 |
+
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
|
268 |
+
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
|
269 |
+
)
|
270 |
+
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
|
271 |
+
|
272 |
+
attn_probs = self.dropout(attn_weights, training=training)
|
273 |
+
attn_output = tf.matmul(attn_probs, value_states)
|
274 |
+
|
275 |
+
tf.debugging.assert_equal(
|
276 |
+
shape_list(attn_output),
|
277 |
+
[bsz * self.num_heads, tgt_len, self.head_dim],
|
278 |
+
message=(
|
279 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
280 |
+
f" {shape_list(attn_output)}"
|
281 |
+
),
|
282 |
+
)
|
283 |
+
|
284 |
+
attn_output = tf.transpose(
|
285 |
+
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
|
286 |
+
)
|
287 |
+
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
|
288 |
+
|
289 |
+
attn_output = self.out_proj(attn_output)
|
290 |
+
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
|
291 |
+
|
292 |
+
return attn_output, attn_weights, past_key_value
|
293 |
+
|
294 |
+
def build(self, input_shape=None):
|
295 |
+
if self.built:
|
296 |
+
return
|
297 |
+
self.built = True
|
298 |
+
if getattr(self, "k_proj", None) is not None:
|
299 |
+
with tf.name_scope(self.k_proj.name):
|
300 |
+
self.k_proj.build([None, None, self.embed_dim])
|
301 |
+
if getattr(self, "q_proj", None) is not None:
|
302 |
+
with tf.name_scope(self.q_proj.name):
|
303 |
+
self.q_proj.build([None, None, self.embed_dim])
|
304 |
+
if getattr(self, "v_proj", None) is not None:
|
305 |
+
with tf.name_scope(self.v_proj.name):
|
306 |
+
self.v_proj.build([None, None, self.embed_dim])
|
307 |
+
if getattr(self, "out_proj", None) is not None:
|
308 |
+
with tf.name_scope(self.out_proj.name):
|
309 |
+
self.out_proj.build([None, None, self.embed_dim])
|
310 |
+
|
311 |
+
|
312 |
+
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Blenderbot
|
313 |
+
class TFBlenderbotEncoderLayer(keras.layers.Layer):
|
314 |
+
def __init__(self, config: BlenderbotConfig, **kwargs):
|
315 |
+
super().__init__(**kwargs)
|
316 |
+
self.embed_dim = config.d_model
|
317 |
+
self.self_attn = TFBlenderbotAttention(
|
318 |
+
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
|
319 |
+
)
|
320 |
+
self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
|
321 |
+
self.dropout = keras.layers.Dropout(config.dropout)
|
322 |
+
self.activation_fn = get_tf_activation(config.activation_function)
|
323 |
+
self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
|
324 |
+
self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
|
325 |
+
self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
|
326 |
+
self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
|
327 |
+
self.config = config
|
328 |
+
|
329 |
+
def call(
|
330 |
+
self,
|
331 |
+
hidden_states: tf.Tensor,
|
332 |
+
attention_mask: tf.Tensor,
|
333 |
+
layer_head_mask: tf.Tensor,
|
334 |
+
training: Optional[bool] = False,
|
335 |
+
):
|
336 |
+
"""
|
337 |
+
Args:
|
338 |
+
hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
|
339 |
+
attention_mask (`tf.Tensor`): attention mask of size
|
340 |
+
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
|
341 |
+
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
|
342 |
+
*(encoder_attention_heads,)*
|
343 |
+
"""
|
344 |
+
residual = hidden_states
|
345 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
346 |
+
hidden_states, self_attn_weights, _ = self.self_attn(
|
347 |
+
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
|
348 |
+
)
|
349 |
+
|
350 |
+
tf.debugging.assert_equal(
|
351 |
+
shape_list(hidden_states),
|
352 |
+
shape_list(residual),
|
353 |
+
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
|
354 |
+
)
|
355 |
+
|
356 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
357 |
+
hidden_states = residual + hidden_states
|
358 |
+
|
359 |
+
residual = hidden_states
|
360 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
361 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
362 |
+
hidden_states = self.activation_dropout(hidden_states, training=training)
|
363 |
+
hidden_states = self.fc2(hidden_states)
|
364 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
365 |
+
hidden_states = residual + hidden_states
|
366 |
+
|
367 |
+
return hidden_states, self_attn_weights
|
368 |
+
|
369 |
+
def build(self, input_shape=None):
|
370 |
+
if self.built:
|
371 |
+
return
|
372 |
+
self.built = True
|
373 |
+
if getattr(self, "self_attn", None) is not None:
|
374 |
+
with tf.name_scope(self.self_attn.name):
|
375 |
+
self.self_attn.build(None)
|
376 |
+
if getattr(self, "self_attn_layer_norm", None) is not None:
|
377 |
+
with tf.name_scope(self.self_attn_layer_norm.name):
|
378 |
+
self.self_attn_layer_norm.build([None, None, self.embed_dim])
|
379 |
+
if getattr(self, "fc1", None) is not None:
|
380 |
+
with tf.name_scope(self.fc1.name):
|
381 |
+
self.fc1.build([None, None, self.embed_dim])
|
382 |
+
if getattr(self, "fc2", None) is not None:
|
383 |
+
with tf.name_scope(self.fc2.name):
|
384 |
+
self.fc2.build([None, None, self.config.encoder_ffn_dim])
|
385 |
+
if getattr(self, "final_layer_norm", None) is not None:
|
386 |
+
with tf.name_scope(self.final_layer_norm.name):
|
387 |
+
self.final_layer_norm.build([None, None, self.embed_dim])
|
388 |
+
|
389 |
+
|
390 |
+
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Blenderbot
|
391 |
+
class TFBlenderbotDecoderLayer(keras.layers.Layer):
|
392 |
+
def __init__(self, config: BlenderbotConfig, **kwargs):
|
393 |
+
super().__init__(**kwargs)
|
394 |
+
self.embed_dim = config.d_model
|
395 |
+
self.self_attn = TFBlenderbotAttention(
|
396 |
+
embed_dim=self.embed_dim,
|
397 |
+
num_heads=config.decoder_attention_heads,
|
398 |
+
dropout=config.attention_dropout,
|
399 |
+
name="self_attn",
|
400 |
+
is_decoder=True,
|
401 |
+
)
|
402 |
+
self.dropout = keras.layers.Dropout(config.dropout)
|
403 |
+
self.activation_fn = get_tf_activation(config.activation_function)
|
404 |
+
self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
|
405 |
+
|
406 |
+
self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
|
407 |
+
self.encoder_attn = TFBlenderbotAttention(
|
408 |
+
self.embed_dim,
|
409 |
+
config.decoder_attention_heads,
|
410 |
+
dropout=config.attention_dropout,
|
411 |
+
name="encoder_attn",
|
412 |
+
is_decoder=True,
|
413 |
+
)
|
414 |
+
self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
|
415 |
+
self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
|
416 |
+
self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
|
417 |
+
self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
|
418 |
+
self.config = config
|
419 |
+
|
420 |
+
def call(
|
421 |
+
self,
|
422 |
+
hidden_states: tf.Tensor,
|
423 |
+
attention_mask: tf.Tensor | None = None,
|
424 |
+
encoder_hidden_states: tf.Tensor | None = None,
|
425 |
+
encoder_attention_mask: tf.Tensor | None = None,
|
426 |
+
layer_head_mask: tf.Tensor | None = None,
|
427 |
+
cross_attn_layer_head_mask: tf.Tensor | None = None,
|
428 |
+
past_key_value: Tuple[tf.Tensor] | None = None,
|
429 |
+
training: Optional[bool] = False,
|
430 |
+
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
|
431 |
+
"""
|
432 |
+
Args:
|
433 |
+
hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
|
434 |
+
attention_mask (`tf.Tensor`): attention mask of size
|
435 |
+
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
|
436 |
+
encoder_hidden_states (`tf.Tensor`):
|
437 |
+
cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
|
438 |
+
encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
|
439 |
+
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
|
440 |
+
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
|
441 |
+
*(decoder_attention_heads,)*
|
442 |
+
cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
|
443 |
+
*(decoder_attention_heads,)*
|
444 |
+
past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
|
445 |
+
"""
|
446 |
+
residual = hidden_states
|
447 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
448 |
+
|
449 |
+
# Self Attention
|
450 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
451 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
452 |
+
# add present self-attn cache to positions 1,2 of present_key_value tuple
|
453 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
454 |
+
hidden_states=hidden_states,
|
455 |
+
past_key_value=self_attn_past_key_value,
|
456 |
+
attention_mask=attention_mask,
|
457 |
+
layer_head_mask=layer_head_mask,
|
458 |
+
)
|
459 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
460 |
+
hidden_states = residual + hidden_states
|
461 |
+
|
462 |
+
# Cross-Attention Block
|
463 |
+
cross_attn_present_key_value = None
|
464 |
+
cross_attn_weights = None
|
465 |
+
if encoder_hidden_states is not None:
|
466 |
+
residual = hidden_states
|
467 |
+
hidden_states = self.encoder_attn_layer_norm(hidden_states)
|
468 |
+
|
469 |
+
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
|
470 |
+
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
|
471 |
+
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
|
472 |
+
hidden_states=hidden_states,
|
473 |
+
key_value_states=encoder_hidden_states,
|
474 |
+
attention_mask=encoder_attention_mask,
|
475 |
+
layer_head_mask=cross_attn_layer_head_mask,
|
476 |
+
past_key_value=cross_attn_past_key_value,
|
477 |
+
)
|
478 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
479 |
+
hidden_states = residual + hidden_states
|
480 |
+
|
481 |
+
# add cross-attn to positions 3,4 of present_key_value tuple
|
482 |
+
present_key_value = present_key_value + cross_attn_present_key_value
|
483 |
+
|
484 |
+
# Fully Connected
|
485 |
+
residual = hidden_states
|
486 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
487 |
+
hidden_states = self.activation_fn(self.fc1(hidden_states))
|
488 |
+
hidden_states = self.activation_dropout(hidden_states, training=training)
|
489 |
+
hidden_states = self.fc2(hidden_states)
|
490 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
491 |
+
hidden_states = residual + hidden_states
|
492 |
+
|
493 |
+
return (
|
494 |
+
hidden_states,
|
495 |
+
self_attn_weights,
|
496 |
+
cross_attn_weights,
|
497 |
+
present_key_value,
|
498 |
+
)
|
499 |
+
|
500 |
+
def build(self, input_shape=None):
|
501 |
+
if self.built:
|
502 |
+
return
|
503 |
+
self.built = True
|
504 |
+
if getattr(self, "self_attn", None) is not None:
|
505 |
+
with tf.name_scope(self.self_attn.name):
|
506 |
+
self.self_attn.build(None)
|
507 |
+
if getattr(self, "self_attn_layer_norm", None) is not None:
|
508 |
+
with tf.name_scope(self.self_attn_layer_norm.name):
|
509 |
+
self.self_attn_layer_norm.build([None, None, self.embed_dim])
|
510 |
+
if getattr(self, "encoder_attn", None) is not None:
|
511 |
+
with tf.name_scope(self.encoder_attn.name):
|
512 |
+
self.encoder_attn.build(None)
|
513 |
+
if getattr(self, "encoder_attn_layer_norm", None) is not None:
|
514 |
+
with tf.name_scope(self.encoder_attn_layer_norm.name):
|
515 |
+
self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
|
516 |
+
if getattr(self, "fc1", None) is not None:
|
517 |
+
with tf.name_scope(self.fc1.name):
|
518 |
+
self.fc1.build([None, None, self.embed_dim])
|
519 |
+
if getattr(self, "fc2", None) is not None:
|
520 |
+
with tf.name_scope(self.fc2.name):
|
521 |
+
self.fc2.build([None, None, self.config.decoder_ffn_dim])
|
522 |
+
if getattr(self, "final_layer_norm", None) is not None:
|
523 |
+
with tf.name_scope(self.final_layer_norm.name):
|
524 |
+
self.final_layer_norm.build([None, None, self.embed_dim])
|
525 |
+
|
526 |
+
|
527 |
+
class TFBlenderbotPreTrainedModel(TFPreTrainedModel):
|
528 |
+
config_class = BlenderbotConfig
|
529 |
+
base_model_prefix = "model"
|
530 |
+
|
531 |
+
|
532 |
+
BLENDERBOT_START_DOCSTRING = r"""
|
533 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
534 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
535 |
+
etc.)
|
536 |
+
|
537 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
538 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
539 |
+
behavior.
|
540 |
+
|
541 |
+
<Tip>
|
542 |
+
|
543 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
544 |
+
|
545 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
546 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
547 |
+
|
548 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
549 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
550 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
551 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
552 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
553 |
+
positional argument:
|
554 |
+
|
555 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
556 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
557 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
558 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
559 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
560 |
+
|
561 |
+
Note that when creating models and layers with
|
562 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
563 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
564 |
+
|
565 |
+
</Tip>
|
566 |
+
|
567 |
+
Args:
|
568 |
+
config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.
|
569 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
570 |
+
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
|
571 |
+
"""
|
572 |
+
|
573 |
+
BLENDERBOT_GENERATION_EXAMPLE = r"""
|
574 |
+
Conversation example::
|
575 |
+
|
576 |
+
```py
|
577 |
+
>>> from transformers import AutoTokenizer, TFBlenderbotForConditionalGeneration
|
578 |
+
|
579 |
+
>>> mname = "facebook/blenderbot-400M-distill"
|
580 |
+
>>> model = TFBlenderbotForConditionalGeneration.from_pretrained(mname)
|
581 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(mname)
|
582 |
+
>>> UTTERANCE = "My friends are cool but they eat too many carbs."
|
583 |
+
>>> print("Human: ", UTTERANCE)
|
584 |
+
|
585 |
+
>>> inputs = tokenizer([UTTERANCE], return_tensors="tf")
|
586 |
+
>>> reply_ids = model.generate(**inputs)
|
587 |
+
>>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
|
588 |
+
|
589 |
+
>>> REPLY = "I'm not sure"
|
590 |
+
>>> print("Human: ", REPLY)
|
591 |
+
>>> NEXT_UTTERANCE = (
|
592 |
+
... "My friends are cool but they eat too many carbs.</s> <s>That's unfortunate. "
|
593 |
+
... "Are they trying to lose weight or are they just trying to be healthier?</s> "
|
594 |
+
... "<s> I'm not sure."
|
595 |
+
... )
|
596 |
+
>>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")
|
597 |
+
>>> next_reply_ids = model.generate(**inputs)
|
598 |
+
>>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
|
599 |
+
```
|
600 |
+
"""
|
601 |
+
|
602 |
+
BLENDERBOT_INPUTS_DOCSTRING = r"""
|
603 |
+
Args:
|
604 |
+
input_ids (`tf.Tensor` of shape `({0})`):
|
605 |
+
Indices of input sequence tokens in the vocabulary.
|
606 |
+
|
607 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
608 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
609 |
+
|
610 |
+
[What are input IDs?](../glossary#input-ids)
|
611 |
+
attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
|
612 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
613 |
+
|
614 |
+
- 1 for tokens that are **not masked**,
|
615 |
+
- 0 for tokens that are **masked**.
|
616 |
+
|
617 |
+
[What are attention masks?](../glossary#attention-mask)
|
618 |
+
decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
619 |
+
Indices of decoder input sequence tokens in the vocabulary.
|
620 |
+
|
621 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
622 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
623 |
+
|
624 |
+
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
625 |
+
|
626 |
+
Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
|
627 |
+
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
628 |
+
`past_key_values`).
|
629 |
+
decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
630 |
+
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
|
631 |
+
decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
632 |
+
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
|
633 |
+
range `[0, config.max_position_embeddings - 1]`.
|
634 |
+
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
635 |
+
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
636 |
+
|
637 |
+
- 1 indicates the head is **not masked**,
|
638 |
+
- 0 indicates the head is **masked**.
|
639 |
+
|
640 |
+
decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
641 |
+
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
|
642 |
+
|
643 |
+
- 1 indicates the head is **not masked**,
|
644 |
+
- 0 indicates the head is **masked**.
|
645 |
+
|
646 |
+
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
647 |
+
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
|
648 |
+
|
649 |
+
- 1 indicates the head is **not masked**,
|
650 |
+
- 0 indicates the head is **masked**.
|
651 |
+
|
652 |
+
encoder_outputs (`tf.FloatTensor`, *optional*):
|
653 |
+
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
|
654 |
+
of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
|
655 |
+
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
|
656 |
+
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
657 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
658 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
659 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
660 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
661 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
662 |
+
`past_key_values`). Set to `False` during training, `True` during generation
|
663 |
+
output_attentions (`bool`, *optional*):
|
664 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
665 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
666 |
+
config will be used instead.
|
667 |
+
output_hidden_states (`bool`, *optional*):
|
668 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
669 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
670 |
+
used instead.
|
671 |
+
return_dict (`bool`, *optional*):
|
672 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
673 |
+
eager mode, in graph mode the value will always be set to True.
|
674 |
+
training (`bool`, *optional*, defaults to `False`):
|
675 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
676 |
+
behaviors between training and evaluation).
|
677 |
+
"""
|
678 |
+
|
679 |
+
|
680 |
+
@keras_serializable
|
681 |
+
class TFBlenderbotEncoder(keras.layers.Layer):
|
682 |
+
config_class = BlenderbotConfig
|
683 |
+
"""
|
684 |
+
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
|
685 |
+
[`TFBlenderbotEncoderLayer`].
|
686 |
+
|
687 |
+
Args:
|
688 |
+
config: BlenderbotConfig
|
689 |
+
"""
|
690 |
+
|
691 |
+
def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
|
692 |
+
super().__init__(**kwargs)
|
693 |
+
self.config = config
|
694 |
+
self.dropout = keras.layers.Dropout(config.dropout)
|
695 |
+
self.layerdrop = config.encoder_layerdrop
|
696 |
+
self.padding_idx = config.pad_token_id
|
697 |
+
self.max_source_positions = config.max_position_embeddings
|
698 |
+
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
|
699 |
+
|
700 |
+
self.embed_tokens = embed_tokens
|
701 |
+
self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(
|
702 |
+
config.max_position_embeddings,
|
703 |
+
config.d_model,
|
704 |
+
name="embed_positions",
|
705 |
+
)
|
706 |
+
self.layers = [TFBlenderbotEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
|
707 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
|
708 |
+
|
709 |
+
def get_embed_tokens(self):
|
710 |
+
return self.embed_tokens
|
711 |
+
|
712 |
+
def set_embed_tokens(self, embed_tokens):
|
713 |
+
self.embed_tokens = embed_tokens
|
714 |
+
|
715 |
+
@unpack_inputs
|
716 |
+
def call(
|
717 |
+
self,
|
718 |
+
input_ids=None,
|
719 |
+
inputs_embeds=None,
|
720 |
+
attention_mask=None,
|
721 |
+
head_mask=None,
|
722 |
+
output_attentions=None,
|
723 |
+
output_hidden_states=None,
|
724 |
+
return_dict=None,
|
725 |
+
training=False,
|
726 |
+
):
|
727 |
+
"""
|
728 |
+
Args:
|
729 |
+
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
|
730 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
731 |
+
provide it.
|
732 |
+
|
733 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
734 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
735 |
+
|
736 |
+
[What are input IDs?](../glossary#input-ids)
|
737 |
+
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
738 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
739 |
+
|
740 |
+
- 1 for tokens that are **not masked**,
|
741 |
+
- 0 for tokens that are **masked**.
|
742 |
+
|
743 |
+
[What are attention masks?](../glossary#attention-mask)
|
744 |
+
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
|
745 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
746 |
+
|
747 |
+
- 1 indicates the head is **not masked**,
|
748 |
+
- 0 indicates the head is **masked**.
|
749 |
+
|
750 |
+
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
751 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
752 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
753 |
+
than the model's internal embedding lookup matrix.
|
754 |
+
output_attentions (`bool`, *optional*):
|
755 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
756 |
+
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
|
757 |
+
in the config will be used instead.
|
758 |
+
output_hidden_states (`bool`, *optional*):
|
759 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
760 |
+
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
|
761 |
+
will be used instead.
|
762 |
+
return_dict (`bool`, *optional*):
|
763 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
|
764 |
+
in eager mode, in graph mode the value will always be set to True.
|
765 |
+
training (`bool`, *optional*, defaults to `False`):
|
766 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
767 |
+
behaviors between training and evaluation).
|
768 |
+
"""
|
769 |
+
if input_ids is not None and inputs_embeds is not None:
|
770 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
771 |
+
elif input_ids is not None:
|
772 |
+
input_shape = shape_list(input_ids)
|
773 |
+
elif inputs_embeds is not None:
|
774 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
775 |
+
else:
|
776 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
777 |
+
|
778 |
+
if inputs_embeds is None:
|
779 |
+
check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
|
780 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
781 |
+
|
782 |
+
embed_pos = self.embed_positions(input_shape)
|
783 |
+
hidden_states = inputs_embeds + embed_pos
|
784 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
785 |
+
|
786 |
+
# check attention mask and invert
|
787 |
+
if attention_mask is not None:
|
788 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
789 |
+
attention_mask = _expand_mask(attention_mask)
|
790 |
+
else:
|
791 |
+
attention_mask = None
|
792 |
+
|
793 |
+
encoder_states = () if output_hidden_states else None
|
794 |
+
all_attentions = () if output_attentions else None
|
795 |
+
|
796 |
+
# check if head_mask has a correct number of layers specified if desired
|
797 |
+
if head_mask is not None:
|
798 |
+
tf.debugging.assert_equal(
|
799 |
+
shape_list(head_mask)[0],
|
800 |
+
len(self.layers),
|
801 |
+
message=(
|
802 |
+
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
|
803 |
+
f" {shape_list(head_mask)[0]}."
|
804 |
+
),
|
805 |
+
)
|
806 |
+
|
807 |
+
# encoder layers
|
808 |
+
for idx, encoder_layer in enumerate(self.layers):
|
809 |
+
if output_hidden_states:
|
810 |
+
encoder_states = encoder_states + (hidden_states,)
|
811 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
812 |
+
dropout_probability = random.uniform(0, 1)
|
813 |
+
if training and (dropout_probability < self.layerdrop): # skip the layer
|
814 |
+
continue
|
815 |
+
|
816 |
+
hidden_states, attn = encoder_layer(
|
817 |
+
hidden_states,
|
818 |
+
attention_mask,
|
819 |
+
head_mask[idx] if head_mask is not None else None,
|
820 |
+
)
|
821 |
+
|
822 |
+
if output_attentions:
|
823 |
+
all_attentions += (attn,)
|
824 |
+
|
825 |
+
hidden_states = self.layer_norm(hidden_states)
|
826 |
+
|
827 |
+
if output_hidden_states:
|
828 |
+
encoder_states = encoder_states + (hidden_states,)
|
829 |
+
|
830 |
+
if not return_dict:
|
831 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
832 |
+
return TFBaseModelOutput(
|
833 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
834 |
+
)
|
835 |
+
|
836 |
+
def build(self, input_shape=None):
|
837 |
+
if self.built:
|
838 |
+
return
|
839 |
+
self.built = True
|
840 |
+
if getattr(self, "embed_positions", None) is not None:
|
841 |
+
with tf.name_scope(self.embed_positions.name):
|
842 |
+
self.embed_positions.build(None)
|
843 |
+
if getattr(self, "layer_norm", None) is not None:
|
844 |
+
with tf.name_scope(self.layer_norm.name):
|
845 |
+
self.layer_norm.build([None, None, self.config.d_model])
|
846 |
+
if getattr(self, "layers", None) is not None:
|
847 |
+
for layer in self.layers:
|
848 |
+
with tf.name_scope(layer.name):
|
849 |
+
layer.build(None)
|
850 |
+
|
851 |
+
|
852 |
+
@keras_serializable
|
853 |
+
class TFBlenderbotDecoder(keras.layers.Layer):
|
854 |
+
config_class = BlenderbotConfig
|
855 |
+
"""
|
856 |
+
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotDecoderLayer`]
|
857 |
+
|
858 |
+
Args:
|
859 |
+
config: BlenderbotConfig
|
860 |
+
embed_tokens: output embedding
|
861 |
+
"""
|
862 |
+
|
863 |
+
def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
|
864 |
+
super().__init__(**kwargs)
|
865 |
+
self.config = config
|
866 |
+
self.padding_idx = config.pad_token_id
|
867 |
+
self.embed_tokens = embed_tokens
|
868 |
+
self.layerdrop = config.decoder_layerdrop
|
869 |
+
self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(
|
870 |
+
config.max_position_embeddings,
|
871 |
+
config.d_model,
|
872 |
+
name="embed_positions",
|
873 |
+
)
|
874 |
+
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
|
875 |
+
self.layers = [TFBlenderbotDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
|
876 |
+
self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
|
877 |
+
|
878 |
+
self.dropout = keras.layers.Dropout(config.dropout)
|
879 |
+
|
880 |
+
def get_embed_tokens(self):
|
881 |
+
return self.embed_tokens
|
882 |
+
|
883 |
+
def set_embed_tokens(self, embed_tokens):
|
884 |
+
self.embed_tokens = embed_tokens
|
885 |
+
|
886 |
+
@unpack_inputs
|
887 |
+
def call(
|
888 |
+
self,
|
889 |
+
input_ids=None,
|
890 |
+
inputs_embeds=None,
|
891 |
+
attention_mask=None,
|
892 |
+
position_ids=None,
|
893 |
+
encoder_hidden_states=None,
|
894 |
+
encoder_attention_mask=None,
|
895 |
+
head_mask=None,
|
896 |
+
cross_attn_head_mask=None,
|
897 |
+
past_key_values=None,
|
898 |
+
use_cache=None,
|
899 |
+
output_attentions=None,
|
900 |
+
output_hidden_states=None,
|
901 |
+
return_dict=None,
|
902 |
+
training=False,
|
903 |
+
):
|
904 |
+
r"""
|
905 |
+
Args:
|
906 |
+
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
|
907 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
908 |
+
provide it.
|
909 |
+
|
910 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
911 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
912 |
+
|
913 |
+
[What are input IDs?](../glossary#input-ids)
|
914 |
+
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
915 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
916 |
+
|
917 |
+
- 1 for tokens that are **not masked**,
|
918 |
+
- 0 for tokens that are **masked**.
|
919 |
+
|
920 |
+
[What are attention masks?](../glossary#attention-mask)
|
921 |
+
position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
922 |
+
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
|
923 |
+
range `[0, config.max_position_embeddings - 1]`.
|
924 |
+
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
|
925 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
|
926 |
+
of the decoder.
|
927 |
+
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
|
928 |
+
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
|
929 |
+
selected in `[0, 1]`:
|
930 |
+
|
931 |
+
- 1 for tokens that are **not masked**,
|
932 |
+
- 0 for tokens that are **masked**.
|
933 |
+
|
934 |
+
[What are attention masks?](../glossary#attention-mask)
|
935 |
+
head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
936 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
937 |
+
|
938 |
+
- 1 indicates the head is **not masked**,
|
939 |
+
- 0 indicates the head is **masked**.
|
940 |
+
|
941 |
+
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
942 |
+
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
|
943 |
+
|
944 |
+
- 1 indicates the head is **not masked**,
|
945 |
+
- 0 indicates the head is **masked**.
|
946 |
+
|
947 |
+
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
948 |
+
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
|
949 |
+
decoding.
|
950 |
+
|
951 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
|
952 |
+
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
|
953 |
+
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
954 |
+
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
955 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
956 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
957 |
+
than the model's internal embedding lookup matrix.
|
958 |
+
output_attentions (`bool`, *optional*):
|
959 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
960 |
+
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
|
961 |
+
in the config will be used instead.
|
962 |
+
output_hidden_states (`bool`, *optional*):
|
963 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
964 |
+
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
|
965 |
+
will be used instead.
|
966 |
+
return_dict (`bool`, *optional*):
|
967 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
|
968 |
+
in eager mode, in graph mode the value will always be set to True.
|
969 |
+
training (`bool`, *optional*, defaults to `False`):
|
970 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
971 |
+
behaviors between training and evaluation).
|
972 |
+
"""
|
973 |
+
if input_ids is not None and inputs_embeds is not None:
|
974 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
975 |
+
elif input_ids is not None:
|
976 |
+
input_shape = shape_list(input_ids)
|
977 |
+
elif inputs_embeds is not None:
|
978 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
979 |
+
else:
|
980 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
981 |
+
|
982 |
+
past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
|
983 |
+
|
984 |
+
# embed positions
|
985 |
+
if position_ids is None:
|
986 |
+
positions = self.embed_positions(input_shape, past_key_values_length)
|
987 |
+
else:
|
988 |
+
positions = self.embed_positions(input_shape, position_ids=position_ids)
|
989 |
+
|
990 |
+
if inputs_embeds is None:
|
991 |
+
check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
|
992 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
993 |
+
|
994 |
+
hidden_states = inputs_embeds
|
995 |
+
|
996 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
997 |
+
if input_shape[-1] > 1:
|
998 |
+
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
|
999 |
+
else:
|
1000 |
+
combined_attention_mask = _expand_mask(
|
1001 |
+
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
|
1002 |
+
)
|
1003 |
+
|
1004 |
+
if attention_mask is not None:
|
1005 |
+
combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
|
1006 |
+
|
1007 |
+
if encoder_hidden_states is not None and encoder_attention_mask is not None:
|
1008 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
1009 |
+
encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
|
1010 |
+
|
1011 |
+
hidden_states = hidden_states + positions
|
1012 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
1013 |
+
|
1014 |
+
# decoder layers
|
1015 |
+
all_hidden_states = () if output_hidden_states else None
|
1016 |
+
all_self_attns = () if output_attentions else None
|
1017 |
+
all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
|
1018 |
+
present_key_values = () if use_cache else None
|
1019 |
+
|
1020 |
+
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
|
1021 |
+
for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
|
1022 |
+
if attn_mask is not None:
|
1023 |
+
tf.debugging.assert_equal(
|
1024 |
+
shape_list(attn_mask)[0],
|
1025 |
+
len(self.layers),
|
1026 |
+
message=(
|
1027 |
+
f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
|
1028 |
+
f" {shape_list(attn_mask)[0]}."
|
1029 |
+
),
|
1030 |
+
)
|
1031 |
+
for idx, decoder_layer in enumerate(self.layers):
|
1032 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
1033 |
+
if output_hidden_states:
|
1034 |
+
all_hidden_states += (hidden_states,)
|
1035 |
+
dropout_probability = random.uniform(0, 1)
|
1036 |
+
|
1037 |
+
if training and (dropout_probability < self.layerdrop):
|
1038 |
+
continue
|
1039 |
+
|
1040 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
1041 |
+
|
1042 |
+
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
|
1043 |
+
hidden_states,
|
1044 |
+
attention_mask=combined_attention_mask,
|
1045 |
+
encoder_hidden_states=encoder_hidden_states,
|
1046 |
+
encoder_attention_mask=encoder_attention_mask,
|
1047 |
+
layer_head_mask=head_mask[idx] if head_mask is not None else None,
|
1048 |
+
cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
|
1049 |
+
past_key_value=past_key_value,
|
1050 |
+
)
|
1051 |
+
|
1052 |
+
if use_cache:
|
1053 |
+
present_key_values += (present_key_value,)
|
1054 |
+
|
1055 |
+
if output_attentions:
|
1056 |
+
all_self_attns += (layer_self_attn,)
|
1057 |
+
|
1058 |
+
if encoder_hidden_states is not None:
|
1059 |
+
all_cross_attns += (layer_cross_attn,)
|
1060 |
+
|
1061 |
+
hidden_states = self.layer_norm(hidden_states)
|
1062 |
+
|
1063 |
+
if output_hidden_states:
|
1064 |
+
all_hidden_states += (hidden_states,)
|
1065 |
+
|
1066 |
+
if not return_dict:
|
1067 |
+
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
|
1068 |
+
else:
|
1069 |
+
return TFBaseModelOutputWithPastAndCrossAttentions(
|
1070 |
+
last_hidden_state=hidden_states,
|
1071 |
+
past_key_values=present_key_values,
|
1072 |
+
hidden_states=all_hidden_states,
|
1073 |
+
attentions=all_self_attns,
|
1074 |
+
cross_attentions=all_cross_attns,
|
1075 |
+
)
|
1076 |
+
|
1077 |
+
def build(self, input_shape=None):
|
1078 |
+
if self.built:
|
1079 |
+
return
|
1080 |
+
self.built = True
|
1081 |
+
if getattr(self, "embed_positions", None) is not None:
|
1082 |
+
with tf.name_scope(self.embed_positions.name):
|
1083 |
+
self.embed_positions.build(None)
|
1084 |
+
if getattr(self, "layer_norm", None) is not None:
|
1085 |
+
with tf.name_scope(self.layer_norm.name):
|
1086 |
+
self.layer_norm.build([None, None, self.config.d_model])
|
1087 |
+
if getattr(self, "layers", None) is not None:
|
1088 |
+
for layer in self.layers:
|
1089 |
+
with tf.name_scope(layer.name):
|
1090 |
+
layer.build(None)
|
1091 |
+
|
1092 |
+
|
1093 |
+
@keras_serializable
|
1094 |
+
class TFBlenderbotMainLayer(keras.layers.Layer):
|
1095 |
+
config_class = BlenderbotConfig
|
1096 |
+
|
1097 |
+
def __init__(self, config: BlenderbotConfig, **kwargs):
|
1098 |
+
super().__init__(**kwargs)
|
1099 |
+
|
1100 |
+
self.config = config
|
1101 |
+
self.shared = keras.layers.Embedding(
|
1102 |
+
input_dim=config.vocab_size,
|
1103 |
+
output_dim=config.d_model,
|
1104 |
+
embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
|
1105 |
+
name="model.shared",
|
1106 |
+
)
|
1107 |
+
# Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
|
1108 |
+
self.shared.load_weight_prefix = "model.shared"
|
1109 |
+
|
1110 |
+
self.encoder = TFBlenderbotEncoder(config, self.shared, name="encoder")
|
1111 |
+
self.decoder = TFBlenderbotDecoder(config, self.shared, name="decoder")
|
1112 |
+
|
1113 |
+
def get_input_embeddings(self):
|
1114 |
+
return self.shared
|
1115 |
+
|
1116 |
+
def set_input_embeddings(self, new_embeddings):
|
1117 |
+
self.shared = new_embeddings
|
1118 |
+
self.encoder.embed_tokens = self.shared
|
1119 |
+
self.decoder.embed_tokens = self.shared
|
1120 |
+
|
1121 |
+
@unpack_inputs
|
1122 |
+
def call(
|
1123 |
+
self,
|
1124 |
+
input_ids=None,
|
1125 |
+
attention_mask=None,
|
1126 |
+
decoder_input_ids=None,
|
1127 |
+
decoder_attention_mask=None,
|
1128 |
+
decoder_position_ids=None,
|
1129 |
+
head_mask=None,
|
1130 |
+
decoder_head_mask=None,
|
1131 |
+
cross_attn_head_mask=None,
|
1132 |
+
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
|
1133 |
+
past_key_values=None,
|
1134 |
+
inputs_embeds=None,
|
1135 |
+
decoder_inputs_embeds=None,
|
1136 |
+
use_cache=None,
|
1137 |
+
output_attentions=None,
|
1138 |
+
output_hidden_states=None,
|
1139 |
+
return_dict=None,
|
1140 |
+
training=False,
|
1141 |
+
**kwargs,
|
1142 |
+
):
|
1143 |
+
output_hidden_states = (
|
1144 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1145 |
+
)
|
1146 |
+
|
1147 |
+
if encoder_outputs is None:
|
1148 |
+
encoder_outputs = self.encoder(
|
1149 |
+
input_ids=input_ids,
|
1150 |
+
attention_mask=attention_mask,
|
1151 |
+
head_mask=head_mask,
|
1152 |
+
inputs_embeds=inputs_embeds,
|
1153 |
+
output_attentions=output_attentions,
|
1154 |
+
output_hidden_states=output_hidden_states,
|
1155 |
+
return_dict=return_dict,
|
1156 |
+
training=training,
|
1157 |
+
)
|
1158 |
+
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
|
1159 |
+
elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
|
1160 |
+
encoder_outputs = TFBaseModelOutput(
|
1161 |
+
last_hidden_state=encoder_outputs[0],
|
1162 |
+
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
|
1163 |
+
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
|
1164 |
+
)
|
1165 |
+
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
|
1166 |
+
elif not return_dict and not isinstance(encoder_outputs, tuple):
|
1167 |
+
encoder_outputs = encoder_outputs.to_tuple()
|
1168 |
+
|
1169 |
+
decoder_outputs = self.decoder(
|
1170 |
+
decoder_input_ids,
|
1171 |
+
attention_mask=decoder_attention_mask,
|
1172 |
+
position_ids=decoder_position_ids,
|
1173 |
+
encoder_hidden_states=encoder_outputs[0],
|
1174 |
+
encoder_attention_mask=attention_mask,
|
1175 |
+
head_mask=decoder_head_mask,
|
1176 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1177 |
+
past_key_values=past_key_values,
|
1178 |
+
inputs_embeds=decoder_inputs_embeds,
|
1179 |
+
use_cache=use_cache,
|
1180 |
+
output_attentions=output_attentions,
|
1181 |
+
output_hidden_states=output_hidden_states,
|
1182 |
+
return_dict=return_dict,
|
1183 |
+
training=training,
|
1184 |
+
)
|
1185 |
+
|
1186 |
+
if not return_dict:
|
1187 |
+
return decoder_outputs + encoder_outputs
|
1188 |
+
|
1189 |
+
return TFSeq2SeqModelOutput(
|
1190 |
+
last_hidden_state=decoder_outputs.last_hidden_state,
|
1191 |
+
past_key_values=decoder_outputs.past_key_values,
|
1192 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
1193 |
+
decoder_attentions=decoder_outputs.attentions,
|
1194 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
1195 |
+
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
1196 |
+
encoder_hidden_states=encoder_outputs.hidden_states,
|
1197 |
+
encoder_attentions=encoder_outputs.attentions,
|
1198 |
+
)
|
1199 |
+
|
1200 |
+
def build(self, input_shape=None):
|
1201 |
+
if self.built:
|
1202 |
+
return
|
1203 |
+
self.built = True
|
1204 |
+
# The shared/tied weights expect to be in the model base namespace
|
1205 |
+
# Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
|
1206 |
+
# the current one.
|
1207 |
+
with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
|
1208 |
+
self.shared.build(None)
|
1209 |
+
if getattr(self, "encoder", None) is not None:
|
1210 |
+
with tf.name_scope(self.encoder.name):
|
1211 |
+
self.encoder.build(None)
|
1212 |
+
if getattr(self, "decoder", None) is not None:
|
1213 |
+
with tf.name_scope(self.decoder.name):
|
1214 |
+
self.decoder.build(None)
|
1215 |
+
|
1216 |
+
|
1217 |
+
@add_start_docstrings(
|
1218 |
+
"The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top.",
|
1219 |
+
BLENDERBOT_START_DOCSTRING,
|
1220 |
+
)
|
1221 |
+
class TFBlenderbotModel(TFBlenderbotPreTrainedModel):
|
1222 |
+
def __init__(self, config: BlenderbotConfig, *inputs, **kwargs):
|
1223 |
+
super().__init__(config, *inputs, **kwargs)
|
1224 |
+
|
1225 |
+
self.model = TFBlenderbotMainLayer(config, name="model")
|
1226 |
+
|
1227 |
+
def get_encoder(self):
|
1228 |
+
return self.model.encoder
|
1229 |
+
|
1230 |
+
def get_decoder(self):
|
1231 |
+
return self.model.decoder
|
1232 |
+
|
1233 |
+
@classmethod
|
1234 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
1235 |
+
if pretrained_model_name_or_path == "facebook/blenderbot-90M":
|
1236 |
+
from ..blenderbot_small import TFBlenderbotSmallModel
|
1237 |
+
|
1238 |
+
warnings.warn(
|
1239 |
+
"The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
|
1240 |
+
" checkpoint `facebook/small_blenderbot-90M` with"
|
1241 |
+
" `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')`"
|
1242 |
+
" instead.",
|
1243 |
+
FutureWarning,
|
1244 |
+
)
|
1245 |
+
return TFBlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path)
|
1246 |
+
|
1247 |
+
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
1248 |
+
|
1249 |
+
@unpack_inputs
|
1250 |
+
@add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1251 |
+
@add_code_sample_docstrings(
|
1252 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1253 |
+
output_type=TFSeq2SeqModelOutput,
|
1254 |
+
config_class=_CONFIG_FOR_DOC,
|
1255 |
+
)
|
1256 |
+
def call(
|
1257 |
+
self,
|
1258 |
+
input_ids: tf.Tensor | None = None,
|
1259 |
+
attention_mask: tf.Tensor | None = None,
|
1260 |
+
decoder_input_ids: tf.Tensor | None = None,
|
1261 |
+
decoder_attention_mask: tf.Tensor | None = None,
|
1262 |
+
decoder_position_ids: tf.Tensor | None = None,
|
1263 |
+
head_mask: tf.Tensor | None = None,
|
1264 |
+
decoder_head_mask: tf.Tensor | None = None,
|
1265 |
+
cross_attn_head_mask: tf.Tensor | None = None,
|
1266 |
+
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
|
1267 |
+
past_key_values: List[tf.Tensor] | None = None,
|
1268 |
+
inputs_embeds: tf.Tensor | None = None,
|
1269 |
+
decoder_inputs_embeds: tf.Tensor | None = None,
|
1270 |
+
use_cache: Optional[bool] = None,
|
1271 |
+
output_attentions: Optional[bool] = None,
|
1272 |
+
output_hidden_states: Optional[bool] = None,
|
1273 |
+
return_dict: Optional[bool] = None,
|
1274 |
+
training: Optional[bool] = False,
|
1275 |
+
**kwargs,
|
1276 |
+
) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
|
1277 |
+
outputs = self.model(
|
1278 |
+
input_ids=input_ids,
|
1279 |
+
attention_mask=attention_mask,
|
1280 |
+
decoder_input_ids=decoder_input_ids,
|
1281 |
+
decoder_attention_mask=decoder_attention_mask,
|
1282 |
+
decoder_position_ids=decoder_position_ids,
|
1283 |
+
head_mask=head_mask,
|
1284 |
+
decoder_head_mask=decoder_head_mask,
|
1285 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1286 |
+
encoder_outputs=encoder_outputs,
|
1287 |
+
past_key_values=past_key_values,
|
1288 |
+
inputs_embeds=inputs_embeds,
|
1289 |
+
decoder_inputs_embeds=decoder_inputs_embeds,
|
1290 |
+
use_cache=use_cache,
|
1291 |
+
output_attentions=output_attentions,
|
1292 |
+
output_hidden_states=output_hidden_states,
|
1293 |
+
return_dict=return_dict,
|
1294 |
+
training=training,
|
1295 |
+
)
|
1296 |
+
|
1297 |
+
return outputs
|
1298 |
+
|
1299 |
+
# Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
|
1300 |
+
def serving_output(self, output):
|
1301 |
+
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
|
1302 |
+
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
|
1303 |
+
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
|
1304 |
+
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
|
1305 |
+
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
|
1306 |
+
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
|
1307 |
+
|
1308 |
+
return TFSeq2SeqModelOutput(
|
1309 |
+
last_hidden_state=output.last_hidden_state,
|
1310 |
+
past_key_values=pkv,
|
1311 |
+
decoder_hidden_states=dec_hs,
|
1312 |
+
decoder_attentions=dec_attns,
|
1313 |
+
cross_attentions=cross_attns,
|
1314 |
+
encoder_last_hidden_state=output.encoder_last_hidden_state,
|
1315 |
+
encoder_hidden_states=enc_hs,
|
1316 |
+
encoder_attentions=enc_attns,
|
1317 |
+
)
|
1318 |
+
|
1319 |
+
def build(self, input_shape=None):
|
1320 |
+
if self.built:
|
1321 |
+
return
|
1322 |
+
self.built = True
|
1323 |
+
if getattr(self, "model", None) is not None:
|
1324 |
+
with tf.name_scope(self.model.name):
|
1325 |
+
self.model.build(None)
|
1326 |
+
|
1327 |
+
|
1328 |
+
# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
|
1329 |
+
class BiasLayer(keras.layers.Layer):
|
1330 |
+
"""
|
1331 |
+
Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
|
1332 |
+
so all weights have to be registered in a layer.
|
1333 |
+
"""
|
1334 |
+
|
1335 |
+
def __init__(self, shape, initializer, trainable, name, **kwargs):
|
1336 |
+
super().__init__(name=name, **kwargs)
|
1337 |
+
# Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
|
1338 |
+
# "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
|
1339 |
+
# https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
|
1340 |
+
self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
|
1341 |
+
|
1342 |
+
def call(self, x):
|
1343 |
+
return x + self.bias
|
1344 |
+
|
1345 |
+
|
1346 |
+
@add_start_docstrings(
|
1347 |
+
"The BLENDERBOT Model with a language modeling head. Can be used for summarization.",
|
1348 |
+
BLENDERBOT_START_DOCSTRING,
|
1349 |
+
)
|
1350 |
+
class TFBlenderbotForConditionalGeneration(TFBlenderbotPreTrainedModel, TFCausalLanguageModelingLoss):
|
1351 |
+
_keys_to_ignore_on_load_unexpected = [
|
1352 |
+
r"model.encoder.embed_tokens.weight",
|
1353 |
+
r"model.decoder.embed_tokens.weight",
|
1354 |
+
]
|
1355 |
+
|
1356 |
+
def __init__(self, config, *inputs, **kwargs):
|
1357 |
+
super().__init__(config, *inputs, **kwargs)
|
1358 |
+
self.model = TFBlenderbotMainLayer(config, name="model")
|
1359 |
+
self.use_cache = config.use_cache
|
1360 |
+
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
|
1361 |
+
self.bias_layer = BiasLayer(
|
1362 |
+
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
|
1363 |
+
)
|
1364 |
+
|
1365 |
+
def get_decoder(self):
|
1366 |
+
return self.model.decoder
|
1367 |
+
|
1368 |
+
def get_encoder(self):
|
1369 |
+
return self.model.encoder
|
1370 |
+
|
1371 |
+
def get_output_embeddings(self):
|
1372 |
+
return self.get_input_embeddings()
|
1373 |
+
|
1374 |
+
def set_output_embeddings(self, value):
|
1375 |
+
self.set_input_embeddings(value)
|
1376 |
+
|
1377 |
+
def get_bias(self):
|
1378 |
+
return {"final_logits_bias": self.bias_layer.bias}
|
1379 |
+
|
1380 |
+
def set_bias(self, value):
|
1381 |
+
# Replaces the existing layers containing bias for correct (de)serialization.
|
1382 |
+
vocab_size = value["final_logits_bias"].shape[-1]
|
1383 |
+
self.bias_layer = BiasLayer(
|
1384 |
+
name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
|
1385 |
+
)
|
1386 |
+
self.bias_layer.bias.assign(value["final_logits_bias"])
|
1387 |
+
|
1388 |
+
@classmethod
|
1389 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
|
1390 |
+
if pretrained_model_name_or_path == "facebook/blenderbot-90M":
|
1391 |
+
from ..blenderbot_small import TFBlenderbotSmallForConditionalGeneration
|
1392 |
+
|
1393 |
+
warnings.warn(
|
1394 |
+
"The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
|
1395 |
+
" checkpoint `facebook/small_blenderbot-90M` with"
|
1396 |
+
" `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')`"
|
1397 |
+
" instead.",
|
1398 |
+
FutureWarning,
|
1399 |
+
)
|
1400 |
+
return TFBlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path)
|
1401 |
+
|
1402 |
+
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
1403 |
+
|
1404 |
+
@unpack_inputs
|
1405 |
+
@add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
|
1406 |
+
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
|
1407 |
+
@add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
|
1408 |
+
def call(
|
1409 |
+
self,
|
1410 |
+
input_ids: tf.Tensor | None = None,
|
1411 |
+
attention_mask: tf.Tensor | None = None,
|
1412 |
+
decoder_input_ids: tf.Tensor | None = None,
|
1413 |
+
decoder_attention_mask: tf.Tensor | None = None,
|
1414 |
+
decoder_position_ids: tf.Tensor | None = None,
|
1415 |
+
head_mask: tf.Tensor | None = None,
|
1416 |
+
decoder_head_mask: tf.Tensor | None = None,
|
1417 |
+
cross_attn_head_mask: tf.Tensor | None = None,
|
1418 |
+
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
|
1419 |
+
past_key_values: List[tf.Tensor] | None = None,
|
1420 |
+
inputs_embeds: tf.Tensor | None = None,
|
1421 |
+
decoder_inputs_embeds: tf.Tensor | None = None,
|
1422 |
+
use_cache: Optional[bool] = None,
|
1423 |
+
output_attentions: Optional[bool] = None,
|
1424 |
+
output_hidden_states: Optional[bool] = None,
|
1425 |
+
return_dict: Optional[bool] = None,
|
1426 |
+
labels: tf.Tensor | None = None,
|
1427 |
+
training: Optional[bool] = False,
|
1428 |
+
) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
|
1429 |
+
r"""
|
1430 |
+
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1431 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1432 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1433 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1434 |
+
|
1435 |
+
Returns:
|
1436 |
+
|
1437 |
+
"""
|
1438 |
+
if labels is not None:
|
1439 |
+
labels = tf.where(
|
1440 |
+
labels == self.config.pad_token_id,
|
1441 |
+
tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
|
1442 |
+
labels,
|
1443 |
+
)
|
1444 |
+
use_cache = False
|
1445 |
+
if decoder_input_ids is None and decoder_inputs_embeds is None:
|
1446 |
+
decoder_input_ids = shift_tokens_right(
|
1447 |
+
labels, self.config.pad_token_id, self.config.decoder_start_token_id
|
1448 |
+
)
|
1449 |
+
|
1450 |
+
outputs = self.model(
|
1451 |
+
input_ids,
|
1452 |
+
attention_mask=attention_mask,
|
1453 |
+
decoder_input_ids=decoder_input_ids,
|
1454 |
+
encoder_outputs=encoder_outputs,
|
1455 |
+
decoder_attention_mask=decoder_attention_mask,
|
1456 |
+
decoder_position_ids=decoder_position_ids,
|
1457 |
+
head_mask=head_mask,
|
1458 |
+
decoder_head_mask=decoder_head_mask,
|
1459 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1460 |
+
past_key_values=past_key_values,
|
1461 |
+
inputs_embeds=inputs_embeds,
|
1462 |
+
decoder_inputs_embeds=decoder_inputs_embeds,
|
1463 |
+
use_cache=use_cache,
|
1464 |
+
output_attentions=output_attentions,
|
1465 |
+
output_hidden_states=output_hidden_states,
|
1466 |
+
return_dict=return_dict,
|
1467 |
+
training=training,
|
1468 |
+
)
|
1469 |
+
lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
|
1470 |
+
lm_logits = self.bias_layer(lm_logits)
|
1471 |
+
masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
|
1472 |
+
|
1473 |
+
if not return_dict:
|
1474 |
+
output = (lm_logits,) + outputs[1:]
|
1475 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
1476 |
+
return TFSeq2SeqLMOutput(
|
1477 |
+
loss=masked_lm_loss,
|
1478 |
+
logits=lm_logits,
|
1479 |
+
past_key_values=outputs.past_key_values, # index 1 of d outputs
|
1480 |
+
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
|
1481 |
+
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
|
1482 |
+
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
|
1483 |
+
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
|
1484 |
+
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
|
1485 |
+
encoder_attentions=outputs.encoder_attentions, # 2 of e out
|
1486 |
+
)
|
1487 |
+
|
1488 |
+
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
|
1489 |
+
def serving_output(self, output):
|
1490 |
+
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
|
1491 |
+
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
|
1492 |
+
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
|
1493 |
+
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
|
1494 |
+
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
|
1495 |
+
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
|
1496 |
+
|
1497 |
+
return TFSeq2SeqLMOutput(
|
1498 |
+
logits=output.logits,
|
1499 |
+
past_key_values=pkv,
|
1500 |
+
decoder_hidden_states=dec_hs,
|
1501 |
+
decoder_attentions=dec_attns,
|
1502 |
+
cross_attentions=cross_attns,
|
1503 |
+
encoder_last_hidden_state=output.encoder_last_hidden_state,
|
1504 |
+
encoder_hidden_states=enc_hs,
|
1505 |
+
encoder_attentions=enc_attns,
|
1506 |
+
)
|
1507 |
+
|
1508 |
+
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
|
1509 |
+
def prepare_inputs_for_generation(
|
1510 |
+
self,
|
1511 |
+
decoder_input_ids,
|
1512 |
+
past_key_values=None,
|
1513 |
+
attention_mask=None,
|
1514 |
+
decoder_attention_mask=None,
|
1515 |
+
head_mask=None,
|
1516 |
+
decoder_head_mask=None,
|
1517 |
+
cross_attn_head_mask=None,
|
1518 |
+
use_cache=None,
|
1519 |
+
encoder_outputs=None,
|
1520 |
+
**kwargs,
|
1521 |
+
):
|
1522 |
+
# cut decoder_input_ids if past_key_values is used
|
1523 |
+
if past_key_values is not None:
|
1524 |
+
decoder_input_ids = decoder_input_ids[:, -1:]
|
1525 |
+
|
1526 |
+
if decoder_attention_mask is not None: # xla
|
1527 |
+
decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
|
1528 |
+
elif past_key_values is not None: # no xla + past_key_values
|
1529 |
+
decoder_position_ids = past_key_values[0][0].shape[2]
|
1530 |
+
else: # no xla + no past_key_values
|
1531 |
+
decoder_position_ids = tf.range(decoder_input_ids.shape[1])
|
1532 |
+
|
1533 |
+
return {
|
1534 |
+
"input_ids": None, # encoder_outputs is defined. input_ids not needed
|
1535 |
+
"encoder_outputs": encoder_outputs,
|
1536 |
+
"past_key_values": past_key_values,
|
1537 |
+
"decoder_input_ids": decoder_input_ids,
|
1538 |
+
"attention_mask": attention_mask,
|
1539 |
+
"decoder_attention_mask": decoder_attention_mask,
|
1540 |
+
"decoder_position_ids": decoder_position_ids,
|
1541 |
+
"head_mask": head_mask,
|
1542 |
+
"decoder_head_mask": decoder_head_mask,
|
1543 |
+
"cross_attn_head_mask": cross_attn_head_mask,
|
1544 |
+
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
|
1545 |
+
}
|
1546 |
+
|
1547 |
+
def build(self, input_shape=None):
|
1548 |
+
if self.built:
|
1549 |
+
return
|
1550 |
+
self.built = True
|
1551 |
+
if getattr(self, "model", None) is not None:
|
1552 |
+
with tf.name_scope(self.model.name):
|
1553 |
+
self.model.build(None)
|
1554 |
+
if getattr(self, "bias_layer", None) is not None:
|
1555 |
+
with tf.name_scope(self.bias_layer.name):
|
1556 |
+
self.bias_layer.build(None)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py
ADDED
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization class for Blenderbot."""
|
16 |
+
|
17 |
+
import json
|
18 |
+
import os
|
19 |
+
from functools import lru_cache
|
20 |
+
from typing import List, Optional, Tuple
|
21 |
+
|
22 |
+
import regex as re
|
23 |
+
|
24 |
+
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
|
25 |
+
from ...utils import logging
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
|
31 |
+
VOCAB_FILES_NAMES = {
|
32 |
+
"vocab_file": "vocab.json",
|
33 |
+
"merges_file": "merges.txt",
|
34 |
+
"tokenizer_config_file": "tokenizer_config.json",
|
35 |
+
}
|
36 |
+
|
37 |
+
|
38 |
+
@lru_cache()
|
39 |
+
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
|
40 |
+
def bytes_to_unicode():
|
41 |
+
"""
|
42 |
+
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
|
43 |
+
characters the bpe code barfs on.
|
44 |
+
|
45 |
+
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
|
46 |
+
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
|
47 |
+
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
|
48 |
+
tables between utf-8 bytes and unicode strings.
|
49 |
+
"""
|
50 |
+
bs = (
|
51 |
+
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
52 |
+
)
|
53 |
+
cs = bs[:]
|
54 |
+
n = 0
|
55 |
+
for b in range(2**8):
|
56 |
+
if b not in bs:
|
57 |
+
bs.append(b)
|
58 |
+
cs.append(2**8 + n)
|
59 |
+
n += 1
|
60 |
+
cs = [chr(n) for n in cs]
|
61 |
+
return dict(zip(bs, cs))
|
62 |
+
|
63 |
+
|
64 |
+
# Copied from transformers.models.roberta.tokenization_roberta.get_pairs
|
65 |
+
def get_pairs(word):
|
66 |
+
"""
|
67 |
+
Return set of symbol pairs in a word.
|
68 |
+
|
69 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
70 |
+
"""
|
71 |
+
pairs = set()
|
72 |
+
prev_char = word[0]
|
73 |
+
for char in word[1:]:
|
74 |
+
pairs.add((prev_char, char))
|
75 |
+
prev_char = char
|
76 |
+
return pairs
|
77 |
+
|
78 |
+
|
79 |
+
class BlenderbotTokenizer(PreTrainedTokenizer):
|
80 |
+
"""
|
81 |
+
Constructs a Blenderbot tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
|
82 |
+
|
83 |
+
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
|
84 |
+
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
85 |
+
|
86 |
+
```python
|
87 |
+
>>> from transformers import BlenderbotTokenizer
|
88 |
+
|
89 |
+
>>> tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
|
90 |
+
>>> tokenizer.add_prefix_space = False
|
91 |
+
>>> tokenizer("Hello world")["input_ids"]
|
92 |
+
[47, 921, 86, 1085, 2]
|
93 |
+
|
94 |
+
>>> tokenizer(" Hello world")["input_ids"]
|
95 |
+
[6950, 1085, 2]
|
96 |
+
```
|
97 |
+
|
98 |
+
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
|
99 |
+
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
|
100 |
+
|
101 |
+
<Tip>
|
102 |
+
|
103 |
+
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
|
104 |
+
|
105 |
+
</Tip>
|
106 |
+
|
107 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
108 |
+
this superclass for more information regarding those methods.
|
109 |
+
|
110 |
+
Args:
|
111 |
+
vocab_file (`str`):
|
112 |
+
Path to the vocabulary file.
|
113 |
+
merges_file (`str`):
|
114 |
+
Path to the merges file.
|
115 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
116 |
+
Paradigm to follow when decoding bytes to UTF-8. See
|
117 |
+
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
118 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
119 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
120 |
+
|
121 |
+
<Tip>
|
122 |
+
|
123 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
124 |
+
sequence. The token used is the `cls_token`.
|
125 |
+
|
126 |
+
</Tip>
|
127 |
+
|
128 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
129 |
+
The end of sequence token.
|
130 |
+
|
131 |
+
<Tip>
|
132 |
+
|
133 |
+
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
|
134 |
+
The token used is the `sep_token`.
|
135 |
+
|
136 |
+
</Tip>
|
137 |
+
|
138 |
+
sep_token (`str`, *optional*, defaults to `"</s>"`):
|
139 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
140 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
141 |
+
token of a sequence built with special tokens.
|
142 |
+
cls_token (`str`, *optional*, defaults to `"<s>"`):
|
143 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
144 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
145 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
146 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
147 |
+
token instead.
|
148 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
149 |
+
The token used for padding, for example when batching sequences of different lengths.
|
150 |
+
mask_token (`str`, *optional*, defaults to `"<mask>"`):
|
151 |
+
The token used for masking values. This is the token used when training this model with masked language
|
152 |
+
modeling. This is the token which the model will try to predict.
|
153 |
+
add_prefix_space (`bool`, *optional*, defaults to `False`):
|
154 |
+
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
|
155 |
+
other word. (Blenderbot tokenizer detect beginning of words by the preceding space).
|
156 |
+
"""
|
157 |
+
|
158 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
159 |
+
model_input_names = ["input_ids", "attention_mask"]
|
160 |
+
|
161 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot
|
162 |
+
def __init__(
|
163 |
+
self,
|
164 |
+
vocab_file,
|
165 |
+
merges_file,
|
166 |
+
errors="replace",
|
167 |
+
bos_token="<s>",
|
168 |
+
eos_token="</s>",
|
169 |
+
sep_token="</s>",
|
170 |
+
cls_token="<s>",
|
171 |
+
unk_token="<unk>",
|
172 |
+
pad_token="<pad>",
|
173 |
+
mask_token="<mask>",
|
174 |
+
add_prefix_space=False,
|
175 |
+
**kwargs,
|
176 |
+
):
|
177 |
+
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
|
178 |
+
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
|
179 |
+
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
|
180 |
+
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
|
181 |
+
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
|
182 |
+
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
|
183 |
+
|
184 |
+
# Mask token behave like a normal word, i.e. include the space before it
|
185 |
+
mask_token = (
|
186 |
+
AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
|
187 |
+
if isinstance(mask_token, str)
|
188 |
+
else mask_token
|
189 |
+
)
|
190 |
+
|
191 |
+
# these special tokens are not part of the vocab.json, let's add them in the correct order
|
192 |
+
|
193 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
194 |
+
self.encoder = json.load(vocab_handle)
|
195 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
196 |
+
self.errors = errors # how to handle errors in decoding
|
197 |
+
self.byte_encoder = bytes_to_unicode()
|
198 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
199 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
200 |
+
bpe_merges = merges_handle.read().split("\n")[1:-1]
|
201 |
+
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
|
202 |
+
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
|
203 |
+
self.cache = {}
|
204 |
+
self.add_prefix_space = add_prefix_space
|
205 |
+
|
206 |
+
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
|
207 |
+
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
|
208 |
+
|
209 |
+
super().__init__(
|
210 |
+
errors=errors,
|
211 |
+
bos_token=bos_token,
|
212 |
+
eos_token=eos_token,
|
213 |
+
unk_token=unk_token,
|
214 |
+
sep_token=sep_token,
|
215 |
+
cls_token=cls_token,
|
216 |
+
pad_token=pad_token,
|
217 |
+
mask_token=mask_token,
|
218 |
+
add_prefix_space=add_prefix_space,
|
219 |
+
**kwargs,
|
220 |
+
)
|
221 |
+
|
222 |
+
@property
|
223 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
|
224 |
+
def vocab_size(self):
|
225 |
+
return len(self.encoder)
|
226 |
+
|
227 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab with Roberta->Blenderbot, RoBERTa->Blenderbot
|
228 |
+
def get_vocab(self):
|
229 |
+
vocab = dict(self.encoder).copy()
|
230 |
+
vocab.update(self.added_tokens_encoder)
|
231 |
+
return vocab
|
232 |
+
|
233 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe with Roberta->Blenderbot, RoBERTa->Blenderbot
|
234 |
+
def bpe(self, token):
|
235 |
+
if token in self.cache:
|
236 |
+
return self.cache[token]
|
237 |
+
word = tuple(token)
|
238 |
+
pairs = get_pairs(word)
|
239 |
+
|
240 |
+
if not pairs:
|
241 |
+
return token
|
242 |
+
|
243 |
+
while True:
|
244 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
245 |
+
if bigram not in self.bpe_ranks:
|
246 |
+
break
|
247 |
+
first, second = bigram
|
248 |
+
new_word = []
|
249 |
+
i = 0
|
250 |
+
while i < len(word):
|
251 |
+
try:
|
252 |
+
j = word.index(first, i)
|
253 |
+
except ValueError:
|
254 |
+
new_word.extend(word[i:])
|
255 |
+
break
|
256 |
+
else:
|
257 |
+
new_word.extend(word[i:j])
|
258 |
+
i = j
|
259 |
+
|
260 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
261 |
+
new_word.append(first + second)
|
262 |
+
i += 2
|
263 |
+
else:
|
264 |
+
new_word.append(word[i])
|
265 |
+
i += 1
|
266 |
+
new_word = tuple(new_word)
|
267 |
+
word = new_word
|
268 |
+
if len(word) == 1:
|
269 |
+
break
|
270 |
+
else:
|
271 |
+
pairs = get_pairs(word)
|
272 |
+
word = " ".join(word)
|
273 |
+
self.cache[token] = word
|
274 |
+
return word
|
275 |
+
|
276 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize with Roberta->Blenderbot, RoBERTa->Blenderbot
|
277 |
+
def _tokenize(self, text):
|
278 |
+
"""Tokenize a string."""
|
279 |
+
bpe_tokens = []
|
280 |
+
for token in re.findall(self.pat, text):
|
281 |
+
token = "".join(
|
282 |
+
self.byte_encoder[b] for b in token.encode("utf-8")
|
283 |
+
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
|
284 |
+
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
|
285 |
+
return bpe_tokens
|
286 |
+
|
287 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id with Roberta->Blenderbot, RoBERTa->Blenderbot
|
288 |
+
def _convert_token_to_id(self, token):
|
289 |
+
"""Converts a token (str) in an id using the vocab."""
|
290 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
291 |
+
|
292 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token with Roberta->Blenderbot, RoBERTa->Blenderbot
|
293 |
+
def _convert_id_to_token(self, index):
|
294 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
295 |
+
return self.decoder.get(index)
|
296 |
+
|
297 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string with Roberta->Blenderbot, RoBERTa->Blenderbot
|
298 |
+
def convert_tokens_to_string(self, tokens):
|
299 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
300 |
+
text = "".join(tokens)
|
301 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
|
302 |
+
return text
|
303 |
+
|
304 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot
|
305 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
306 |
+
if not os.path.isdir(save_directory):
|
307 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
308 |
+
return
|
309 |
+
vocab_file = os.path.join(
|
310 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
311 |
+
)
|
312 |
+
merge_file = os.path.join(
|
313 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
314 |
+
)
|
315 |
+
|
316 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
317 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
318 |
+
|
319 |
+
index = 0
|
320 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
321 |
+
writer.write("#version: 0.2\n")
|
322 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
323 |
+
if index != token_index:
|
324 |
+
logger.warning(
|
325 |
+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
326 |
+
" Please check that the tokenizer is not corrupted!"
|
327 |
+
)
|
328 |
+
index = token_index
|
329 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
330 |
+
index += 1
|
331 |
+
|
332 |
+
return vocab_file, merge_file
|
333 |
+
|
334 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask with Roberta->Blenderbot, RoBERTa->Blenderbot
|
335 |
+
def get_special_tokens_mask(
|
336 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
337 |
+
) -> List[int]:
|
338 |
+
"""
|
339 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
340 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
341 |
+
|
342 |
+
Args:
|
343 |
+
token_ids_0 (`List[int]`):
|
344 |
+
List of IDs.
|
345 |
+
token_ids_1 (`List[int]`, *optional*):
|
346 |
+
Optional second list of IDs for sequence pairs.
|
347 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
348 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
349 |
+
|
350 |
+
Returns:
|
351 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
352 |
+
"""
|
353 |
+
if already_has_special_tokens:
|
354 |
+
return super().get_special_tokens_mask(
|
355 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
356 |
+
)
|
357 |
+
|
358 |
+
if token_ids_1 is None:
|
359 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
360 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
361 |
+
|
362 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot
|
363 |
+
def create_token_type_ids_from_sequences(
|
364 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
365 |
+
) -> List[int]:
|
366 |
+
"""
|
367 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not
|
368 |
+
make use of token type ids, therefore a list of zeros is returned.
|
369 |
+
|
370 |
+
Args:
|
371 |
+
token_ids_0 (`List[int]`):
|
372 |
+
List of IDs.
|
373 |
+
token_ids_1 (`List[int]`, *optional*):
|
374 |
+
Optional second list of IDs for sequence pairs.
|
375 |
+
|
376 |
+
Returns:
|
377 |
+
`List[int]`: List of zeros.
|
378 |
+
"""
|
379 |
+
sep = [self.sep_token_id]
|
380 |
+
cls = [self.cls_token_id]
|
381 |
+
|
382 |
+
if token_ids_1 is None:
|
383 |
+
return len(cls + token_ids_0 + sep) * [0]
|
384 |
+
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
385 |
+
|
386 |
+
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.prepare_for_tokenization with Roberta->Blenderbot, RoBERTa->Blenderbot
|
387 |
+
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
|
388 |
+
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
|
389 |
+
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
|
390 |
+
text = " " + text
|
391 |
+
return (text, kwargs)
|
392 |
+
|
393 |
+
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None):
|
394 |
+
"""
|
395 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
396 |
+
adding special tokens. A Blenderbot sequence has the following format:
|
397 |
+
- single sequence: ` X </s>`
|
398 |
+
|
399 |
+
Args:
|
400 |
+
token_ids_0 (`List[int]`):
|
401 |
+
List of IDs to which the special tokens will be added
|
402 |
+
token_ids_1 (`List[int]`, *optional*):
|
403 |
+
Will be ignored
|
404 |
+
Returns:
|
405 |
+
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
406 |
+
"""
|
407 |
+
return token_ids_0 + [self.eos_token_id]
|
408 |
+
|
409 |
+
@property
|
410 |
+
def default_chat_template(self):
|
411 |
+
"""
|
412 |
+
A very simple chat template that just adds whitespace between messages.
|
413 |
+
"""
|
414 |
+
logger.warning_once(
|
415 |
+
"\nNo chat template is defined for this tokenizer - using the default template "
|
416 |
+
f"for the {self.__class__.__name__} class. If the default is not appropriate for "
|
417 |
+
"your model, please set `tokenizer.chat_template` to an appropriate template. "
|
418 |
+
"See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
|
419 |
+
)
|
420 |
+
return (
|
421 |
+
"{% for message in messages %}"
|
422 |
+
"{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
|
423 |
+
"{{ message['content'] }}"
|
424 |
+
"{% if not loop.last %}{{ ' ' }}{% endif %}"
|
425 |
+
"{% endfor %}"
|
426 |
+
"{{ eos_token }}"
|
427 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py
ADDED
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Fast Tokenization class for Blenderbot."""
|
16 |
+
import json
|
17 |
+
from typing import List, Optional, Tuple
|
18 |
+
|
19 |
+
from tokenizers import pre_tokenizers, processors
|
20 |
+
|
21 |
+
from ...tokenization_utils_base import AddedToken, BatchEncoding
|
22 |
+
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
23 |
+
from ...utils import logging
|
24 |
+
from .tokenization_blenderbot import BlenderbotTokenizer
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
VOCAB_FILES_NAMES = {
|
31 |
+
"vocab_file": "vocab.json",
|
32 |
+
"merges_file": "merges.txt",
|
33 |
+
"tokenizer_config_file": "tokenizer_config.json",
|
34 |
+
}
|
35 |
+
|
36 |
+
|
37 |
+
class BlenderbotTokenizerFast(PreTrainedTokenizerFast):
|
38 |
+
"""
|
39 |
+
Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2
|
40 |
+
tokenizer, using byte-level Byte-Pair-Encoding.
|
41 |
+
|
42 |
+
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
|
43 |
+
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
44 |
+
|
45 |
+
```python
|
46 |
+
>>> from transformers import BlenderbotTokenizerFast
|
47 |
+
|
48 |
+
>>> tokenizer = BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B")
|
49 |
+
>>> tokenizer("Hello world")["input_ids"]
|
50 |
+
[6950, 1085, 2]
|
51 |
+
|
52 |
+
>>> tokenizer(" Hello world")["input_ids"]
|
53 |
+
[6950, 1085, 2]
|
54 |
+
```
|
55 |
+
|
56 |
+
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
|
57 |
+
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
|
58 |
+
|
59 |
+
<Tip>
|
60 |
+
|
61 |
+
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
|
62 |
+
|
63 |
+
</Tip>
|
64 |
+
|
65 |
+
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
66 |
+
refer to this superclass for more information regarding those methods.
|
67 |
+
|
68 |
+
Args:
|
69 |
+
vocab_file (`str`):
|
70 |
+
Path to the vocabulary file.
|
71 |
+
merges_file (`str`):
|
72 |
+
Path to the merges file.
|
73 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
74 |
+
Paradigm to follow when decoding bytes to UTF-8. See
|
75 |
+
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
76 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
77 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
78 |
+
|
79 |
+
<Tip>
|
80 |
+
|
81 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
82 |
+
sequence. The token used is the `cls_token`.
|
83 |
+
|
84 |
+
</Tip>
|
85 |
+
|
86 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
87 |
+
The end of sequence token.
|
88 |
+
|
89 |
+
<Tip>
|
90 |
+
|
91 |
+
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
|
92 |
+
The token used is the `sep_token`.
|
93 |
+
|
94 |
+
</Tip>
|
95 |
+
|
96 |
+
sep_token (`str`, *optional*, defaults to `"</s>"`):
|
97 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
98 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
99 |
+
token of a sequence built with special tokens.
|
100 |
+
cls_token (`str`, *optional*, defaults to `"<s>"`):
|
101 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
102 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
103 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
104 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
105 |
+
token instead.
|
106 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
107 |
+
The token used for padding, for example when batching sequences of different lengths.
|
108 |
+
mask_token (`str`, *optional*, defaults to `"<mask>"`):
|
109 |
+
The token used for masking values. This is the token used when training this model with masked language
|
110 |
+
modeling. This is the token which the model will try to predict.
|
111 |
+
add_prefix_space (`bool`, *optional*, defaults to `False`):
|
112 |
+
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
|
113 |
+
other word. (Blenderbot tokenizer detect beginning of words by the preceding space).
|
114 |
+
trim_offsets (`bool`, *optional*, defaults to `True`):
|
115 |
+
Whether the post processing step should trim offsets to avoid including whitespaces.
|
116 |
+
"""
|
117 |
+
|
118 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
119 |
+
model_input_names = ["input_ids", "attention_mask"]
|
120 |
+
slow_tokenizer_class = BlenderbotTokenizer
|
121 |
+
|
122 |
+
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot
|
123 |
+
def __init__(
|
124 |
+
self,
|
125 |
+
vocab_file=None,
|
126 |
+
merges_file=None,
|
127 |
+
tokenizer_file=None,
|
128 |
+
errors="replace",
|
129 |
+
bos_token="<s>",
|
130 |
+
eos_token="</s>",
|
131 |
+
sep_token="</s>",
|
132 |
+
cls_token="<s>",
|
133 |
+
unk_token="<unk>",
|
134 |
+
pad_token="<pad>",
|
135 |
+
mask_token="<mask>",
|
136 |
+
add_prefix_space=False,
|
137 |
+
trim_offsets=True,
|
138 |
+
**kwargs,
|
139 |
+
):
|
140 |
+
mask_token = (
|
141 |
+
AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
|
142 |
+
if isinstance(mask_token, str)
|
143 |
+
else mask_token
|
144 |
+
)
|
145 |
+
super().__init__(
|
146 |
+
vocab_file,
|
147 |
+
merges_file,
|
148 |
+
tokenizer_file=tokenizer_file,
|
149 |
+
errors=errors,
|
150 |
+
bos_token=bos_token,
|
151 |
+
eos_token=eos_token,
|
152 |
+
sep_token=sep_token,
|
153 |
+
cls_token=cls_token,
|
154 |
+
unk_token=unk_token,
|
155 |
+
pad_token=pad_token,
|
156 |
+
mask_token=mask_token,
|
157 |
+
add_prefix_space=add_prefix_space,
|
158 |
+
trim_offsets=trim_offsets,
|
159 |
+
**kwargs,
|
160 |
+
)
|
161 |
+
|
162 |
+
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
|
163 |
+
if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
|
164 |
+
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
|
165 |
+
pre_tok_state["add_prefix_space"] = add_prefix_space
|
166 |
+
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
|
167 |
+
|
168 |
+
self.add_prefix_space = add_prefix_space
|
169 |
+
|
170 |
+
tokenizer_component = "post_processor"
|
171 |
+
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
|
172 |
+
if tokenizer_component_instance:
|
173 |
+
state = json.loads(tokenizer_component_instance.__getstate__())
|
174 |
+
|
175 |
+
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
|
176 |
+
if "sep" in state:
|
177 |
+
state["sep"] = tuple(state["sep"])
|
178 |
+
if "cls" in state:
|
179 |
+
state["cls"] = tuple(state["cls"])
|
180 |
+
|
181 |
+
changes_to_apply = False
|
182 |
+
|
183 |
+
if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
|
184 |
+
state["add_prefix_space"] = add_prefix_space
|
185 |
+
changes_to_apply = True
|
186 |
+
|
187 |
+
if state.get("trim_offsets", trim_offsets) != trim_offsets:
|
188 |
+
state["trim_offsets"] = trim_offsets
|
189 |
+
changes_to_apply = True
|
190 |
+
|
191 |
+
if changes_to_apply:
|
192 |
+
component_class = getattr(processors, state.pop("type"))
|
193 |
+
new_value = component_class(**state)
|
194 |
+
setattr(self.backend_tokenizer, tokenizer_component, new_value)
|
195 |
+
|
196 |
+
@property
|
197 |
+
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
|
198 |
+
def mask_token(self) -> str:
|
199 |
+
"""
|
200 |
+
`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
|
201 |
+
having been set.
|
202 |
+
|
203 |
+
Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
|
204 |
+
comprise the space before the *<mask>*.
|
205 |
+
"""
|
206 |
+
if self._mask_token is None:
|
207 |
+
if self.verbose:
|
208 |
+
logger.error("Using mask_token, but it is not set yet.")
|
209 |
+
return None
|
210 |
+
return str(self._mask_token)
|
211 |
+
|
212 |
+
@mask_token.setter
|
213 |
+
def mask_token(self, value):
|
214 |
+
"""
|
215 |
+
Overriding the default behavior of the mask token to have it eat the space before it.
|
216 |
+
|
217 |
+
This is needed to preserve backward compatibility with all the previously used models based on Roberta.
|
218 |
+
"""
|
219 |
+
# Mask token behave like a normal word, i.e. include the space before it
|
220 |
+
# So we set lstrip to True
|
221 |
+
value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
|
222 |
+
self._mask_token = value
|
223 |
+
|
224 |
+
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._batch_encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot
|
225 |
+
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
|
226 |
+
is_split_into_words = kwargs.get("is_split_into_words", False)
|
227 |
+
assert self.add_prefix_space or not is_split_into_words, (
|
228 |
+
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
|
229 |
+
"to use it with pretokenized inputs."
|
230 |
+
)
|
231 |
+
|
232 |
+
return super()._batch_encode_plus(*args, **kwargs)
|
233 |
+
|
234 |
+
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot
|
235 |
+
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
|
236 |
+
is_split_into_words = kwargs.get("is_split_into_words", False)
|
237 |
+
|
238 |
+
assert self.add_prefix_space or not is_split_into_words, (
|
239 |
+
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
|
240 |
+
"to use it with pretokenized inputs."
|
241 |
+
)
|
242 |
+
|
243 |
+
return super()._encode_plus(*args, **kwargs)
|
244 |
+
|
245 |
+
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot
|
246 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
247 |
+
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
|
248 |
+
return tuple(files)
|
249 |
+
|
250 |
+
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot
|
251 |
+
def create_token_type_ids_from_sequences(
|
252 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
253 |
+
) -> List[int]:
|
254 |
+
"""
|
255 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not
|
256 |
+
make use of token type ids, therefore a list of zeros is returned.
|
257 |
+
|
258 |
+
Args:
|
259 |
+
token_ids_0 (`List[int]`):
|
260 |
+
List of IDs.
|
261 |
+
token_ids_1 (`List[int]`, *optional*):
|
262 |
+
Optional second list of IDs for sequence pairs.
|
263 |
+
|
264 |
+
Returns:
|
265 |
+
`List[int]`: List of zeros.
|
266 |
+
"""
|
267 |
+
sep = [self.sep_token_id]
|
268 |
+
cls = [self.cls_token_id]
|
269 |
+
|
270 |
+
if token_ids_1 is None:
|
271 |
+
return len(cls + token_ids_0 + sep) * [0]
|
272 |
+
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
273 |
+
|
274 |
+
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None):
|
275 |
+
"""
|
276 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
277 |
+
adding special tokens. A Blenderbot sequence has the following format:
|
278 |
+
- single sequence: ` X </s>`
|
279 |
+
|
280 |
+
Args:
|
281 |
+
token_ids_0 (`List[int]`):
|
282 |
+
List of IDs to which the special tokens will be added
|
283 |
+
token_ids_1 (`List[int]`, *optional*):
|
284 |
+
Will be ignored
|
285 |
+
Returns:
|
286 |
+
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
287 |
+
"""
|
288 |
+
return token_ids_0 + [self.eos_token_id]
|
289 |
+
|
290 |
+
@property
|
291 |
+
# Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
|
292 |
+
def default_chat_template(self):
|
293 |
+
"""
|
294 |
+
A very simple chat template that just adds whitespace between messages.
|
295 |
+
"""
|
296 |
+
logger.warning_once(
|
297 |
+
"\nNo chat template is defined for this tokenizer - using the default template "
|
298 |
+
f"for the {self.__class__.__name__} class. If the default is not appropriate for "
|
299 |
+
"your model, please set `tokenizer.chat_template` to an appropriate template. "
|
300 |
+
"See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
|
301 |
+
)
|
302 |
+
return (
|
303 |
+
"{% for message in messages %}"
|
304 |
+
"{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
|
305 |
+
"{{ message['content'] }}"
|
306 |
+
"{% if not loop.last %}{{ ' ' }}{% endif %}"
|
307 |
+
"{% endfor %}"
|
308 |
+
"{{ eos_token }}"
|
309 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__init__.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_torch_available,
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
_import_structure = {
|
24 |
+
"configuration_clvp": [
|
25 |
+
"CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
26 |
+
"ClvpConfig",
|
27 |
+
"ClvpDecoderConfig",
|
28 |
+
"ClvpEncoderConfig",
|
29 |
+
],
|
30 |
+
"feature_extraction_clvp": ["ClvpFeatureExtractor"],
|
31 |
+
"processing_clvp": ["ClvpProcessor"],
|
32 |
+
"tokenization_clvp": ["ClvpTokenizer"],
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
try:
|
37 |
+
if not is_torch_available():
|
38 |
+
raise OptionalDependencyNotAvailable()
|
39 |
+
except OptionalDependencyNotAvailable:
|
40 |
+
pass
|
41 |
+
else:
|
42 |
+
_import_structure["modeling_clvp"] = [
|
43 |
+
"CLVP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
44 |
+
"ClvpModelForConditionalGeneration",
|
45 |
+
"ClvpForCausalLM",
|
46 |
+
"ClvpModel",
|
47 |
+
"ClvpPreTrainedModel",
|
48 |
+
"ClvpEncoder",
|
49 |
+
"ClvpDecoder",
|
50 |
+
]
|
51 |
+
|
52 |
+
|
53 |
+
if TYPE_CHECKING:
|
54 |
+
from .configuration_clvp import (
|
55 |
+
CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
56 |
+
ClvpConfig,
|
57 |
+
ClvpDecoderConfig,
|
58 |
+
ClvpEncoderConfig,
|
59 |
+
)
|
60 |
+
from .feature_extraction_clvp import ClvpFeatureExtractor
|
61 |
+
from .processing_clvp import ClvpProcessor
|
62 |
+
from .tokenization_clvp import ClvpTokenizer
|
63 |
+
|
64 |
+
try:
|
65 |
+
if not is_torch_available():
|
66 |
+
raise OptionalDependencyNotAvailable()
|
67 |
+
except OptionalDependencyNotAvailable:
|
68 |
+
pass
|
69 |
+
else:
|
70 |
+
from .modeling_clvp import (
|
71 |
+
CLVP_PRETRAINED_MODEL_ARCHIVE_LIST,
|
72 |
+
ClvpDecoder,
|
73 |
+
ClvpEncoder,
|
74 |
+
ClvpForCausalLM,
|
75 |
+
ClvpModel,
|
76 |
+
ClvpModelForConditionalGeneration,
|
77 |
+
ClvpPreTrainedModel,
|
78 |
+
)
|
79 |
+
|
80 |
+
else:
|
81 |
+
import sys
|
82 |
+
|
83 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc
ADDED
Binary file (17.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/convert_clvp_to_hf.cpython-310.pyc
ADDED
Binary file (6.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc
ADDED
Binary file (9.23 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc
ADDED
Binary file (63.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/number_normalizer.cpython-310.pyc
ADDED
Binary file (6.83 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc
ADDED
Binary file (2.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/configuration_clvp.py
ADDED
@@ -0,0 +1,456 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" CLVP model configuration"""
|
16 |
+
|
17 |
+
|
18 |
+
import os
|
19 |
+
from typing import TYPE_CHECKING, Union
|
20 |
+
|
21 |
+
|
22 |
+
if TYPE_CHECKING:
|
23 |
+
pass
|
24 |
+
|
25 |
+
from ...configuration_utils import PretrainedConfig
|
26 |
+
from ...utils import logging
|
27 |
+
|
28 |
+
|
29 |
+
logger = logging.get_logger(__name__)
|
30 |
+
|
31 |
+
|
32 |
+
from ..deprecated._archive_maps import CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
33 |
+
|
34 |
+
|
35 |
+
class ClvpEncoderConfig(PretrainedConfig):
|
36 |
+
r"""
|
37 |
+
This is the configuration class to store the configuration of a [`ClvpEncoder`]. It is used to instantiate a CLVP
|
38 |
+
text or CLVP speech encoder according to the specified arguments. Instantiating a configuration with the defaults
|
39 |
+
will yield a similar configuration to that of the encoder of the CLVP
|
40 |
+
[susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
|
41 |
+
|
42 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
43 |
+
documentation from [`PretrainedConfig`] for more information.
|
44 |
+
|
45 |
+
Args:
|
46 |
+
vocab_size (`int`, *optional*, defaults to 256):
|
47 |
+
Vocabulary size of the CLVP Encoder model.
|
48 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
49 |
+
Dimensionality of the encoder layers and the pooler layer.
|
50 |
+
intermediate_size (`int`, *optional*, defaults to 1536):
|
51 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
52 |
+
projection_dim (`int`, *optional*, defaults to 768):
|
53 |
+
Dimensionality of the projection vector.
|
54 |
+
num_hidden_layers (`int`, *optional*, defaults to 20):
|
55 |
+
Number of hidden layers in the Transformer encoder.
|
56 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
57 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
58 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
59 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
60 |
+
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
|
61 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
62 |
+
The epsilon used by the layer normalization layers.
|
63 |
+
attention_dropout (`float`, *optional*, defaults to 0.1):
|
64 |
+
The dropout ratio for the attention probabilities.
|
65 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
66 |
+
The dropout ratio for the feed-forward layers in [`ClvpEncoderMLP`].
|
67 |
+
use_rotary_embedding (`bool`, *optional*, defaults to `True`):
|
68 |
+
Whether to use rotary_embedding or not.
|
69 |
+
use_attention_bias (`bool`, *optional*, defaults to `False`):
|
70 |
+
Whether to use bias in Query, Key and Value layers during self attention.
|
71 |
+
summary_type (`str`, *optional*, defaults to `"mean"`):
|
72 |
+
What strategy to use to get pooler_output from the last_hidden_state. `"last"`, `"first"`, `"mean"` and
|
73 |
+
`"cls_index"` are supported.
|
74 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
75 |
+
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
|
76 |
+
testing).
|
77 |
+
bos_token_id (`int`, *optional*, defaults to 255):
|
78 |
+
Beginning of sequence token id.
|
79 |
+
eos_token_id (`int`, *optional*, defaults to 0):
|
80 |
+
End of sequence token id.
|
81 |
+
|
82 |
+
Example:
|
83 |
+
|
84 |
+
```python
|
85 |
+
>>> from transformers import ClvpEncoderConfig, ClvpEncoder
|
86 |
+
|
87 |
+
>>> # Initializing a ClvpEncoderConfig with susnato/clvp_dev style configuration
|
88 |
+
>>> encoder_configuration = ClvpEncoderConfig()
|
89 |
+
|
90 |
+
>>> # Initializing a ClvpEncoder (with random weights) from the susnato/clvp_dev style configuration
|
91 |
+
>>> model = ClvpEncoder(encoder_configuration)
|
92 |
+
|
93 |
+
>>> # Accessing the model configuration
|
94 |
+
>>> configuration = model.config
|
95 |
+
```"""
|
96 |
+
|
97 |
+
model_type = "clvp_encoder"
|
98 |
+
|
99 |
+
def __init__(
|
100 |
+
self,
|
101 |
+
vocab_size=256,
|
102 |
+
hidden_size=768,
|
103 |
+
intermediate_size=1536,
|
104 |
+
projection_dim=768,
|
105 |
+
num_hidden_layers=20,
|
106 |
+
num_attention_heads=12,
|
107 |
+
hidden_act="gelu",
|
108 |
+
layer_norm_eps=1e-5,
|
109 |
+
attention_dropout=0.1,
|
110 |
+
dropout=0.1,
|
111 |
+
use_rotary_embedding=True,
|
112 |
+
use_attention_bias=False,
|
113 |
+
summary_type="mean",
|
114 |
+
initializer_factor=1.0,
|
115 |
+
bos_token_id=255,
|
116 |
+
eos_token_id=0,
|
117 |
+
**kwargs,
|
118 |
+
):
|
119 |
+
self.vocab_size = vocab_size
|
120 |
+
self.hidden_size = hidden_size
|
121 |
+
self.intermediate_size = intermediate_size
|
122 |
+
self.projection_dim = projection_dim
|
123 |
+
self.num_hidden_layers = num_hidden_layers
|
124 |
+
self.num_attention_heads = num_attention_heads
|
125 |
+
self.layer_norm_eps = layer_norm_eps
|
126 |
+
self.hidden_act = hidden_act
|
127 |
+
self.initializer_factor = initializer_factor
|
128 |
+
self.attention_dropout = attention_dropout
|
129 |
+
self.dropout = dropout
|
130 |
+
self.use_rotary_embedding = use_rotary_embedding
|
131 |
+
self.use_attention_bias = use_attention_bias
|
132 |
+
self.summary_type = summary_type
|
133 |
+
self.bos_token_id = bos_token_id
|
134 |
+
self.eos_token_id = eos_token_id
|
135 |
+
|
136 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
137 |
+
|
138 |
+
@classmethod
|
139 |
+
def from_pretrained(
|
140 |
+
cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str = "text_config", **kwargs
|
141 |
+
) -> "PretrainedConfig":
|
142 |
+
cls._set_token_in_kwargs(kwargs)
|
143 |
+
|
144 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
145 |
+
|
146 |
+
# make sure to have the config_type be either "text_config" or "speech_config"
|
147 |
+
# this is to make sure that we can load only text or speech configs from the nested ClvpConfig.
|
148 |
+
if config_type not in ["text_config", "speech_config"]:
|
149 |
+
raise ValueError(
|
150 |
+
f"We can only load either 'text_config' or 'speech_config' but you are trying to load" f"{config_type}"
|
151 |
+
)
|
152 |
+
|
153 |
+
# get the text config dict if we are loading from ClvpConfig
|
154 |
+
if config_dict.get("model_type") == "clvp":
|
155 |
+
config_dict = config_dict[config_type]
|
156 |
+
|
157 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
158 |
+
logger.warning(
|
159 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
160 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
161 |
+
)
|
162 |
+
|
163 |
+
return cls.from_dict(config_dict, **kwargs)
|
164 |
+
|
165 |
+
|
166 |
+
class ClvpDecoderConfig(PretrainedConfig):
|
167 |
+
r"""
|
168 |
+
This is the configuration class to store the configuration of a [`ClvpDecoder`]. It is used to instantiate a CLVP
|
169 |
+
Decoder Model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
170 |
+
with the defaults will yield a similar configuration to that of the Decoder part of the CLVP
|
171 |
+
[susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
|
172 |
+
|
173 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
174 |
+
documentation from [`PretrainedConfig`] for more information.
|
175 |
+
|
176 |
+
The architecture is similar to GPT2.
|
177 |
+
|
178 |
+
Args:
|
179 |
+
vocab_size (`int`, *optional*, defaults to 8194):
|
180 |
+
Vocabulary size of the model.
|
181 |
+
max_position_embeddings (`int`, *optional*, defaults to 608):
|
182 |
+
The maximum sequence length of mel tokens that this model might ever be used with. Similar to `n_positions`
|
183 |
+
in `GPT2Config`.
|
184 |
+
max_text_tokens (`int`, *optional*, defaults to 404):
|
185 |
+
The maximum sequence length of text tokens that this model might ever be used with. Similar to
|
186 |
+
`n_positions` in `GPT2Config`.
|
187 |
+
hidden_size (`int`, *optional*, defaults to 1024):
|
188 |
+
Dimensionality of the embeddings and hidden states.
|
189 |
+
num_hidden_layers (`int`, *optional*, defaults to 30):
|
190 |
+
Number of hidden layers in the Transformer encoder.
|
191 |
+
num_attention_heads (`int`, *optional*, defaults to 16):
|
192 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
193 |
+
n_inner (`int`, *optional*):
|
194 |
+
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times `hidden_size`.
|
195 |
+
num_mel_attn_blocks (`int`, *optional*, defaults to 6):
|
196 |
+
Denotes the number of self attention layers in [`ClvpConditioningEncoder`].
|
197 |
+
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
|
198 |
+
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
|
199 |
+
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
200 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
201 |
+
embd_pdrop (`float`, *optional*, defaults to 0.1):
|
202 |
+
The dropout ratio for the embeddings.
|
203 |
+
attention_dropout (`float`, *optional*, defaults to 0.1):
|
204 |
+
The dropout ratio for the attention.
|
205 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
|
206 |
+
The epsilon to use in the layer normalization layers.
|
207 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
208 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
209 |
+
summary_type (`string`, *optional*, defaults to `"cls_index"`):
|
210 |
+
Argument used when doing sequence summary.
|
211 |
+
|
212 |
+
Has to be one of the following options:
|
213 |
+
|
214 |
+
- `"last"`: Take the last token hidden state (like XLNet).
|
215 |
+
- `"first"`: Take the first token hidden state (like BERT).
|
216 |
+
- `"mean"`: Take the mean of all tokens hidden states.
|
217 |
+
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
|
218 |
+
- `"attn"`: Not implemented now, use multi-head attention.
|
219 |
+
summary_use_proj (`bool`, *optional*, defaults to `True`):
|
220 |
+
Whether or not to add a projection after the vector extraction.
|
221 |
+
summary_activation (`str`, *optional*):
|
222 |
+
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
|
223 |
+
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
|
224 |
+
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
|
225 |
+
summary_first_dropout (`float`, *optional*, defaults to 0.1):
|
226 |
+
The dropout ratio to be used after the projection and activation.
|
227 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
228 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
229 |
+
bos_token_id (`int`, *optional*, defaults to 8192):
|
230 |
+
Beginning of sequence token id, used at the start of the generation.
|
231 |
+
eos_token_id (`int`, *optional*, defaults to 8193):
|
232 |
+
End of sequence token id, used in the method
|
233 |
+
[`ClvpModelForConditionalGeneration.fix_speech_decoder_output()`] to correct decoder outputs.
|
234 |
+
feature_size (`int`, *optional*, defaults to 80):
|
235 |
+
The feature dimension of the extracted mel features. This value is used in [`ClvpConditioningEncoder`].
|
236 |
+
use_attention_bias (`bool`, *optional*, defaults to `True`):
|
237 |
+
Whether to use bias in Query, Key and Value layers during self attention.
|
238 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
239 |
+
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
|
240 |
+
testing).
|
241 |
+
decoder_fixing_codes (`list`, *optional*, defaults to `[83, 45, 45, 248]`):
|
242 |
+
These values are used in the method `fix_speech_decoder_output` to fix decoder generated outputs.
|
243 |
+
|
244 |
+
Example:
|
245 |
+
|
246 |
+
```python
|
247 |
+
>>> from transformers import ClvpDecoderConfig, ClvpDecoder
|
248 |
+
|
249 |
+
>>> # Initializing a ClvpDecoderConfig with susnato/clvp_dev style configuration
|
250 |
+
>>> decoder_configuration = ClvpDecoderConfig()
|
251 |
+
|
252 |
+
>>> # Initializing a ClvpDecoder (with random weights) from the susnato/clvp_dev style configuration
|
253 |
+
>>> model = ClvpDecoder(decoder_configuration)
|
254 |
+
|
255 |
+
>>> # Accessing the model configuration
|
256 |
+
>>> configuration = model.config
|
257 |
+
```"""
|
258 |
+
|
259 |
+
model_type = "clvp_decoder"
|
260 |
+
|
261 |
+
def __init__(
|
262 |
+
self,
|
263 |
+
vocab_size=8194,
|
264 |
+
max_position_embeddings=608,
|
265 |
+
max_text_tokens=404,
|
266 |
+
hidden_size=1024,
|
267 |
+
num_hidden_layers=30,
|
268 |
+
num_attention_heads=16,
|
269 |
+
n_inner=None,
|
270 |
+
num_mel_attn_blocks=6,
|
271 |
+
activation_function="gelu_new",
|
272 |
+
resid_pdrop=0.1,
|
273 |
+
embd_pdrop=0.1,
|
274 |
+
attention_dropout=0.1,
|
275 |
+
layer_norm_epsilon=1e-5,
|
276 |
+
initializer_range=0.02,
|
277 |
+
summary_type="cls_index",
|
278 |
+
summary_use_proj=True,
|
279 |
+
summary_activation=None,
|
280 |
+
summary_proj_to_labels=True,
|
281 |
+
summary_first_dropout=0.1,
|
282 |
+
use_cache=True,
|
283 |
+
bos_token_id=8192,
|
284 |
+
eos_token_id=8193,
|
285 |
+
feature_size=80,
|
286 |
+
use_attention_bias=True,
|
287 |
+
initializer_factor=1.0,
|
288 |
+
decoder_fixing_codes=[83, 45, 45, 248],
|
289 |
+
**kwargs,
|
290 |
+
):
|
291 |
+
self.vocab_size = vocab_size
|
292 |
+
self.max_position_embeddings = max_position_embeddings
|
293 |
+
self.max_text_tokens = max_text_tokens
|
294 |
+
self.hidden_size = hidden_size
|
295 |
+
self.num_hidden_layers = num_hidden_layers
|
296 |
+
self.num_attention_heads = num_attention_heads
|
297 |
+
self.n_inner = n_inner
|
298 |
+
self.num_mel_attn_blocks = num_mel_attn_blocks
|
299 |
+
self.activation_function = activation_function
|
300 |
+
self.resid_pdrop = resid_pdrop
|
301 |
+
self.embd_pdrop = embd_pdrop
|
302 |
+
self.attention_dropout = attention_dropout
|
303 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
304 |
+
self.initializer_range = initializer_range
|
305 |
+
self.summary_type = summary_type
|
306 |
+
self.summary_use_proj = summary_use_proj
|
307 |
+
self.summary_activation = summary_activation
|
308 |
+
self.summary_first_dropout = summary_first_dropout
|
309 |
+
self.summary_proj_to_labels = summary_proj_to_labels
|
310 |
+
self.use_cache = use_cache
|
311 |
+
self.feature_size = feature_size
|
312 |
+
self.use_attention_bias = use_attention_bias
|
313 |
+
self.initializer_factor = initializer_factor
|
314 |
+
self.decoder_fixing_codes = decoder_fixing_codes
|
315 |
+
|
316 |
+
self.bos_token_id = bos_token_id
|
317 |
+
self.eos_token_id = eos_token_id
|
318 |
+
|
319 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
320 |
+
|
321 |
+
@classmethod
|
322 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
323 |
+
cls._set_token_in_kwargs(kwargs)
|
324 |
+
|
325 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
326 |
+
|
327 |
+
# get the speech config dict if we are loading from ClvpConfig
|
328 |
+
if config_dict.get("model_type") == "clvp":
|
329 |
+
config_dict = config_dict["decoder_config"]
|
330 |
+
|
331 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
332 |
+
logger.warning(
|
333 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
334 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
335 |
+
)
|
336 |
+
|
337 |
+
return cls.from_dict(config_dict, **kwargs)
|
338 |
+
|
339 |
+
|
340 |
+
class ClvpConfig(PretrainedConfig):
|
341 |
+
r"""
|
342 |
+
[`ClvpConfig`] is the configuration class to store the configuration of a [`ClvpModelForConditionalGeneration`]. It
|
343 |
+
is used to instantiate a CLVP model according to the specified arguments, defining the text model, speech model and
|
344 |
+
decoder model configs. Instantiating a configuration with the defaults will yield a similar configuration to that
|
345 |
+
of the CLVP [susnato/clvp_dev](https://huggingface.co/susnato/clvp_dev) architecture.
|
346 |
+
|
347 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
348 |
+
documentation from [`PretrainedConfig`] for more information.
|
349 |
+
|
350 |
+
Args:
|
351 |
+
text_config (`dict`, *optional*):
|
352 |
+
Dictionary of configuration options used to initialize the CLVP text encoder.
|
353 |
+
speech_config (`dict`, *optional*):
|
354 |
+
Dictionary of configuration options used to initialize CLVP speech encoder.
|
355 |
+
decoder_config (`dict`, *optional*):
|
356 |
+
Dictionary of configuration options used to initialize [`ClvpDecoderConfig`].
|
357 |
+
projection_dim (`int`, *optional*, defaults to 768):
|
358 |
+
Dimentionality of text and speech projection layers.
|
359 |
+
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
|
360 |
+
The inital value of the *logit_scale* paramter. Default is used as per the original CLVP implementation.
|
361 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
362 |
+
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
|
363 |
+
testing).
|
364 |
+
kwargs (*optional*):
|
365 |
+
Dictionary of keyword arguments.
|
366 |
+
|
367 |
+
Example:
|
368 |
+
|
369 |
+
```python
|
370 |
+
>>> from transformers import ClvpConfig, ClvpModelForConditionalGeneration
|
371 |
+
|
372 |
+
>>> # Initializing a ClvpConfig with susnato/clvp_dev style configuration
|
373 |
+
>>> configuration = ClvpConfig()
|
374 |
+
|
375 |
+
>>> # Initializing a ClvpModelForConditionalGeneration (with random weights) from the susnato/clvp_dev style configuration
|
376 |
+
>>> model = ClvpModelForConditionalGeneration(configuration)
|
377 |
+
|
378 |
+
>>> # Accessing the model configuration
|
379 |
+
>>> configuration = model.config
|
380 |
+
|
381 |
+
>>> # We can also initialize a CLVPConfig from a CLVPTextConfig, CLVPSpeechConfig and a CLVPAutoRegressiveConfig
|
382 |
+
>>> from transformers import ClvpEncoderConfig, ClvpDecoderConfig
|
383 |
+
|
384 |
+
>>> # Initializing a CLVP text, CLVP speech and CLVP decoder configuration
|
385 |
+
>>> config_text = ClvpEncoderConfig()
|
386 |
+
>>> config_speech = ClvpEncoderConfig()
|
387 |
+
>>> decoder_config = ClvpDecoderConfig()
|
388 |
+
|
389 |
+
>>> config = ClvpConfig.from_sub_model_configs(config_text, config_speech, decoder_config)
|
390 |
+
```"""
|
391 |
+
|
392 |
+
model_type = "clvp"
|
393 |
+
is_composition = True
|
394 |
+
|
395 |
+
def __init__(
|
396 |
+
self,
|
397 |
+
text_config=None,
|
398 |
+
speech_config=None,
|
399 |
+
decoder_config=None,
|
400 |
+
projection_dim=768,
|
401 |
+
logit_scale_init_value=2.6592,
|
402 |
+
initializer_factor=1.0,
|
403 |
+
**kwargs,
|
404 |
+
):
|
405 |
+
super().__init__(**kwargs)
|
406 |
+
|
407 |
+
if text_config is None:
|
408 |
+
text_config = {}
|
409 |
+
logger.info("`text_config` is `None`. Initializing the `ClvpEncoderConfig` with default values.")
|
410 |
+
|
411 |
+
if speech_config is None:
|
412 |
+
speech_config = {}
|
413 |
+
logger.info("`speech_config` is `None`. initializing the `ClvpEncoderConfig` with default values.")
|
414 |
+
|
415 |
+
if decoder_config is None:
|
416 |
+
decoder_config = {}
|
417 |
+
logger.info("`decoder_config` is `None`. initializing the `ClvpDecoderConfig` with default values.")
|
418 |
+
|
419 |
+
self.text_config = ClvpEncoderConfig(**text_config)
|
420 |
+
self.speech_config = ClvpEncoderConfig(**speech_config)
|
421 |
+
self.decoder_config = ClvpDecoderConfig(**decoder_config)
|
422 |
+
|
423 |
+
self.projection_dim = projection_dim
|
424 |
+
self.logit_scale_init_value = logit_scale_init_value
|
425 |
+
self.initializer_factor = initializer_factor
|
426 |
+
|
427 |
+
@classmethod
|
428 |
+
def from_sub_model_configs(
|
429 |
+
cls,
|
430 |
+
text_config: ClvpEncoderConfig,
|
431 |
+
speech_config: ClvpEncoderConfig,
|
432 |
+
decoder_config: ClvpDecoderConfig,
|
433 |
+
**kwargs,
|
434 |
+
):
|
435 |
+
r"""
|
436 |
+
Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model
|
437 |
+
configuration and CLVP decoder model configuration.
|
438 |
+
|
439 |
+
Args:
|
440 |
+
text_config (`ClvpEncoderConfig`):
|
441 |
+
Text model configuration of type [`ClvpEncoderConfig`].
|
442 |
+
speech_config (`ClvpEncoderConfig`):
|
443 |
+
Speech model configuration of type [`ClvpEncoderConfig`].
|
444 |
+
decoder_config (`ClvpDecoderConfig`):
|
445 |
+
Decoder model configuration of type [`ClvpDecoderConfig`].
|
446 |
+
|
447 |
+
Returns:
|
448 |
+
[`ClvpConfig`]: An instance of a configuration object
|
449 |
+
"""
|
450 |
+
|
451 |
+
return cls(
|
452 |
+
text_config=text_config.to_dict(),
|
453 |
+
speech_config=speech_config.to_dict(),
|
454 |
+
decoder_config=decoder_config.to_dict(),
|
455 |
+
**kwargs,
|
456 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/convert_clvp_to_hf.py
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""
|
17 |
+
Weights conversion script for CLVP
|
18 |
+
"""
|
19 |
+
|
20 |
+
import argparse
|
21 |
+
import os
|
22 |
+
|
23 |
+
import torch
|
24 |
+
from huggingface_hub import hf_hub_download
|
25 |
+
|
26 |
+
from transformers import ClvpConfig, ClvpModelForConditionalGeneration
|
27 |
+
|
28 |
+
|
29 |
+
_MODELS = {
|
30 |
+
"clvp": "https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/clvp2.pth",
|
31 |
+
"decoder": "https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/autoregressive.pth",
|
32 |
+
}
|
33 |
+
|
34 |
+
dim = 1024
|
35 |
+
sub_dim = dim // 16
|
36 |
+
|
37 |
+
CLVP_ENCODERS_MAPPING = {
|
38 |
+
"text_transformer.transformer.attn_layers": "text_encoder_model",
|
39 |
+
"speech_transformer.transformer.attn_layers": "speech_encoder_model",
|
40 |
+
"text_transformer.transformer.norm": "text_encoder_model.final_layer_norm",
|
41 |
+
"speech_transformer.transformer.norm": "speech_encoder_model.final_layer_norm",
|
42 |
+
"to_text_latent": "text_encoder_model.projection",
|
43 |
+
"to_speech_latent": "speech_encoder_model.projection",
|
44 |
+
"text_emb": "text_encoder_model.token_embedding",
|
45 |
+
"speech_emb": "speech_encoder_model.token_embedding",
|
46 |
+
"1.wrap.net.0": "mlp.fc1",
|
47 |
+
"1.wrap.net.3": "mlp.fc2",
|
48 |
+
"1.wrap": "self_attn",
|
49 |
+
"to_out": "out_proj",
|
50 |
+
"to_q": "q_proj",
|
51 |
+
"to_k": "k_proj",
|
52 |
+
"to_v": "v_proj",
|
53 |
+
"temperature": "logit_scale",
|
54 |
+
}
|
55 |
+
|
56 |
+
CLVP_DECODER_MAPPING = {
|
57 |
+
"conditioning_encoder.init": "conditioning_encoder.mel_conv",
|
58 |
+
"conditioning_encoder.attn": "conditioning_encoder.mel_attn_blocks",
|
59 |
+
"mel_attn_blocks": "group_norms",
|
60 |
+
".norm.weight": ".weight",
|
61 |
+
".norm.bias": ".bias",
|
62 |
+
"text_embedding": "conditioning_encoder.text_token_embedding",
|
63 |
+
"text_pos_embedding.emb": "conditioning_encoder.text_position_embedding",
|
64 |
+
"final_norm": "speech_decoder_model.final_norm",
|
65 |
+
"mel_head": "speech_decoder_model.lm_head",
|
66 |
+
"gpt.ln_f": "speech_decoder_model.model.decoder.layer_norm",
|
67 |
+
"mel_embedding": "speech_decoder_model.model.decoder.input_embeds_layer",
|
68 |
+
"mel_pos_embedding.emb": "speech_decoder_model.model.decoder.position_embeds_layer",
|
69 |
+
"gpt.h": "speech_decoder_model.model.decoder.layers",
|
70 |
+
"ln_1": "input_layernorm",
|
71 |
+
"ln_2": "post_attention_layernorm",
|
72 |
+
}
|
73 |
+
|
74 |
+
|
75 |
+
def update_index(present_index):
|
76 |
+
if present_index % 2 == 0:
|
77 |
+
return int(present_index / 2)
|
78 |
+
else:
|
79 |
+
return int((present_index - 1) / 2)
|
80 |
+
|
81 |
+
|
82 |
+
def convert_encoder_weights(original_weights):
|
83 |
+
converted_weights = {}
|
84 |
+
original_weights_keys = sorted(original_weights.keys())
|
85 |
+
for original_key in original_weights_keys:
|
86 |
+
updated_key = original_key
|
87 |
+
# for input_rmsnorm.weight and post_attention_rmsnorm.weight
|
88 |
+
if "0.0.g" in updated_key:
|
89 |
+
present_index = updated_key.split(".")[4]
|
90 |
+
if int(present_index) % 2 == 0:
|
91 |
+
updated_key = updated_key.replace("0.0.g", "input_rmsnorm.weight")
|
92 |
+
else:
|
93 |
+
updated_key = updated_key.replace("0.0.g", "post_attention_rmsnorm.weight")
|
94 |
+
|
95 |
+
if "transformer.attn_layers.layers" in updated_key:
|
96 |
+
present_index = updated_key.split(".")[4]
|
97 |
+
updated_index = update_index(int(present_index))
|
98 |
+
updated_key = updated_key.replace(
|
99 |
+
f"transformer.attn_layers.layers.{present_index}", f"transformer.attn_layers.layers.{updated_index}"
|
100 |
+
)
|
101 |
+
|
102 |
+
for k, v in CLVP_ENCODERS_MAPPING.items():
|
103 |
+
if k in updated_key:
|
104 |
+
updated_key = updated_key.replace(k, v)
|
105 |
+
|
106 |
+
converted_weights[updated_key] = original_weights.pop(original_key)
|
107 |
+
|
108 |
+
return converted_weights
|
109 |
+
|
110 |
+
|
111 |
+
def convert_decoder_weights(original_weights):
|
112 |
+
converted_weights = {}
|
113 |
+
original_weights_keys = sorted(original_weights.keys())
|
114 |
+
for original_key in original_weights_keys:
|
115 |
+
updated_key = original_key
|
116 |
+
if len(updated_key.split(".")) > 3:
|
117 |
+
index, attr = updated_key.split(".")[2], updated_key.split(".")[-1]
|
118 |
+
|
119 |
+
# for decoder attention
|
120 |
+
if "attn.c_attn" in updated_key:
|
121 |
+
if attr == "weight":
|
122 |
+
slice1, slice2, slice3 = original_weights[updated_key].squeeze(-1).T.split(split_size=dim, dim=0)
|
123 |
+
else:
|
124 |
+
slice1, slice2, slice3 = original_weights[updated_key].split(split_size=dim, dim=0)
|
125 |
+
converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.q_proj.{attr}"] = slice1
|
126 |
+
converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.k_proj.{attr}"] = slice2
|
127 |
+
converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.v_proj.{attr}"] = slice3
|
128 |
+
continue
|
129 |
+
|
130 |
+
if "attn.c_proj" in updated_key:
|
131 |
+
converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.out_proj.{attr}"] = (
|
132 |
+
original_weights[updated_key].squeeze(-1).T
|
133 |
+
)
|
134 |
+
continue
|
135 |
+
|
136 |
+
if "attn.bias" in updated_key or "attn.masked_bias" in updated_key or "text_head" in updated_key:
|
137 |
+
original_weights.pop(updated_key)
|
138 |
+
continue
|
139 |
+
|
140 |
+
# conditional encoder attention
|
141 |
+
if "qkv" in updated_key:
|
142 |
+
if attr == "weight":
|
143 |
+
slice1, slice2, slice3 = original_weights[updated_key].squeeze(-1).split(split_size=dim, dim=0)
|
144 |
+
else:
|
145 |
+
slice1, slice2, slice3 = original_weights[updated_key].split(split_size=dim, dim=0)
|
146 |
+
|
147 |
+
indices = torch.arange(dim)
|
148 |
+
index1, index2, index3 = (
|
149 |
+
indices.unfold(0, sub_dim, sub_dim * 3).flatten(),
|
150 |
+
indices[sub_dim:].unfold(0, sub_dim, sub_dim * 3).flatten(),
|
151 |
+
indices[2 * sub_dim :].unfold(0, sub_dim, sub_dim * 3).flatten(),
|
152 |
+
)
|
153 |
+
|
154 |
+
converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.q_proj.{attr}"] = torch.concatenate(
|
155 |
+
[slice1[index1], slice2[index3], slice3[index2]],
|
156 |
+
axis=0,
|
157 |
+
)
|
158 |
+
converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.k_proj.{attr}"] = torch.concatenate(
|
159 |
+
[slice1[index2], slice2[index1], slice3[index3]],
|
160 |
+
axis=0,
|
161 |
+
)
|
162 |
+
converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.v_proj.{attr}"] = torch.concatenate(
|
163 |
+
[slice1[index3], slice2[index2], slice3[index1]],
|
164 |
+
axis=0,
|
165 |
+
)
|
166 |
+
continue
|
167 |
+
|
168 |
+
if "proj_out" in updated_key:
|
169 |
+
converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.out_proj.{attr}"] = original_weights[
|
170 |
+
updated_key
|
171 |
+
].squeeze(-1)
|
172 |
+
continue
|
173 |
+
|
174 |
+
for k, v in CLVP_DECODER_MAPPING.items():
|
175 |
+
if k in updated_key:
|
176 |
+
updated_key = updated_key.replace(k, v)
|
177 |
+
|
178 |
+
converted_weights[updated_key] = original_weights.pop(original_key)
|
179 |
+
|
180 |
+
return converted_weights
|
181 |
+
|
182 |
+
|
183 |
+
def _download(url: str, root: str):
|
184 |
+
repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}"
|
185 |
+
filename = f"{url.split('/')[-2]}/{url.split('/')[-1]}"
|
186 |
+
hf_hub_download(
|
187 |
+
repo_id=repo_id,
|
188 |
+
filename=filename,
|
189 |
+
force_filename=root,
|
190 |
+
local_dir_use_symlinks=False,
|
191 |
+
)
|
192 |
+
|
193 |
+
|
194 |
+
def convert_clvp_weights(checkpoint_path, pytorch_dump_folder_path):
|
195 |
+
converted_checkpoint = {}
|
196 |
+
|
197 |
+
for each_model_name, each_model_url in _MODELS.items():
|
198 |
+
each_model_path = os.path.join(checkpoint_path, each_model_url.split("/")[-1])
|
199 |
+
if not os.path.exists(each_model_path):
|
200 |
+
print(f"\n{each_model_name} was not found! Downloading it to {each_model_path}")
|
201 |
+
_download(url=each_model_url, root=each_model_path)
|
202 |
+
|
203 |
+
if each_model_name == "clvp":
|
204 |
+
clvp_checkpoint = torch.load(each_model_path, map_location="cpu")
|
205 |
+
else:
|
206 |
+
decoder_checkpoint = torch.load(each_model_path, map_location="cpu")
|
207 |
+
|
208 |
+
# Converting the weights
|
209 |
+
converted_checkpoint.update(**convert_encoder_weights(clvp_checkpoint))
|
210 |
+
converted_checkpoint.update(**convert_decoder_weights(decoder_checkpoint))
|
211 |
+
|
212 |
+
config = ClvpConfig.from_pretrained("susnato/clvp_dev")
|
213 |
+
model = ClvpModelForConditionalGeneration(config)
|
214 |
+
|
215 |
+
model.load_state_dict(converted_checkpoint, strict=True)
|
216 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
217 |
+
print(f"Model saved at {pytorch_dump_folder_path}!")
|
218 |
+
|
219 |
+
|
220 |
+
if __name__ == "__main__":
|
221 |
+
parser = argparse.ArgumentParser()
|
222 |
+
# # Required parameters
|
223 |
+
parser.add_argument(
|
224 |
+
"--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)"
|
225 |
+
)
|
226 |
+
parser.add_argument(
|
227 |
+
"--pytorch_dump_folder_path",
|
228 |
+
default=None,
|
229 |
+
type=str,
|
230 |
+
help="Path to the output PyTorch model. (Please enter full path)",
|
231 |
+
)
|
232 |
+
args = parser.parse_args()
|
233 |
+
|
234 |
+
convert_clvp_weights(args.checkpoint_path, args.pytorch_dump_folder_path)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/feature_extraction_clvp.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""
|
17 |
+
Feature extractor class for CLVP
|
18 |
+
"""
|
19 |
+
|
20 |
+
from typing import List, Optional, Union
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
from ...audio_utils import mel_filter_bank, spectrogram, window_function
|
25 |
+
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
|
26 |
+
from ...feature_extraction_utils import BatchFeature
|
27 |
+
from ...utils import TensorType, logging
|
28 |
+
|
29 |
+
|
30 |
+
logger = logging.get_logger(__name__)
|
31 |
+
|
32 |
+
|
33 |
+
class ClvpFeatureExtractor(SequenceFeatureExtractor):
|
34 |
+
r"""
|
35 |
+
Constructs a CLVP feature extractor.
|
36 |
+
|
37 |
+
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
|
38 |
+
most of the main methods. Users should refer to this superclass for more information regarding those methods.
|
39 |
+
|
40 |
+
This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short
|
41 |
+
Time Fourier Transform` which should match pytorch's `torch.stft` equivalent.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
feature_size (`int`, *optional*, defaults to 80):
|
45 |
+
The feature dimension of the extracted features.
|
46 |
+
sampling_rate (`int`, *optional*, defaults to 22050):
|
47 |
+
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
|
48 |
+
default_audio_length (`int`, *optional*, defaults to 6):
|
49 |
+
The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will
|
50 |
+
automatically be set to default_audio_length * `self.sampling_rate`.
|
51 |
+
hop_length (`int`, *optional*, defaults to 256):
|
52 |
+
Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.
|
53 |
+
chunk_length (`int`, *optional*, defaults to 30):
|
54 |
+
The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio
|
55 |
+
sequences.
|
56 |
+
n_fft (`int`, *optional*, defaults to 1024):
|
57 |
+
Size of the Fourier transform.
|
58 |
+
padding_value (`float`, *optional*, defaults to 0.0):
|
59 |
+
Padding value used to pad the audio. Should correspond to silences.
|
60 |
+
mel_norms (`list` of length `feature_size`, *optional*):
|
61 |
+
If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each
|
62 |
+
mel-filter.
|
63 |
+
return_attention_mask (`bool`, *optional*, defaults to `False`):
|
64 |
+
Whether to return the attention mask. If left to the default, it will return the attention mask.
|
65 |
+
|
66 |
+
[What are attention masks?](../glossary#attention-mask)
|
67 |
+
"""
|
68 |
+
|
69 |
+
model_input_names = ["input_features", "attention_mask"]
|
70 |
+
|
71 |
+
def __init__(
|
72 |
+
self,
|
73 |
+
feature_size=80,
|
74 |
+
sampling_rate=22050,
|
75 |
+
default_audio_length=6,
|
76 |
+
hop_length=256,
|
77 |
+
chunk_length=30,
|
78 |
+
n_fft=1024,
|
79 |
+
padding_value=0.0,
|
80 |
+
mel_norms=None,
|
81 |
+
return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
|
82 |
+
**kwargs,
|
83 |
+
):
|
84 |
+
super().__init__(
|
85 |
+
feature_size=feature_size,
|
86 |
+
sampling_rate=sampling_rate,
|
87 |
+
padding_value=padding_value,
|
88 |
+
return_attention_mask=return_attention_mask,
|
89 |
+
**kwargs,
|
90 |
+
)
|
91 |
+
self.n_fft = n_fft
|
92 |
+
self.hop_length = hop_length
|
93 |
+
self.chunk_length = chunk_length
|
94 |
+
self.n_samples = chunk_length * sampling_rate
|
95 |
+
self.nb_max_frames = self.n_samples // hop_length
|
96 |
+
self.sampling_rate = sampling_rate
|
97 |
+
self.default_audio_length = default_audio_length
|
98 |
+
self.mel_norms = mel_norms
|
99 |
+
self.mel_filters = mel_filter_bank(
|
100 |
+
num_frequency_bins=1 + (n_fft // 2),
|
101 |
+
num_mel_filters=feature_size,
|
102 |
+
min_frequency=0.0,
|
103 |
+
max_frequency=8000.0,
|
104 |
+
sampling_rate=sampling_rate,
|
105 |
+
norm="slaney",
|
106 |
+
mel_scale="htk",
|
107 |
+
)
|
108 |
+
|
109 |
+
def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray:
|
110 |
+
"""
|
111 |
+
This method first computes the log-mel spectrogram of the provided audio then applies normalization along the
|
112 |
+
each mel-filterbank, if `mel_norms` is provided.
|
113 |
+
"""
|
114 |
+
log_spec = spectrogram(
|
115 |
+
waveform,
|
116 |
+
window_function(self.n_fft, "hann"),
|
117 |
+
frame_length=self.n_fft,
|
118 |
+
hop_length=self.hop_length,
|
119 |
+
power=2.0,
|
120 |
+
mel_filters=self.mel_filters,
|
121 |
+
log_mel=None,
|
122 |
+
)
|
123 |
+
|
124 |
+
log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None))
|
125 |
+
|
126 |
+
if self.mel_norms is not None:
|
127 |
+
log_spec = log_spec / np.array(self.mel_norms)[:, None]
|
128 |
+
|
129 |
+
return log_spec
|
130 |
+
|
131 |
+
def __call__(
|
132 |
+
self,
|
133 |
+
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
|
134 |
+
sampling_rate: Optional[int] = None,
|
135 |
+
truncation: bool = True,
|
136 |
+
pad_to_multiple_of: Optional[int] = None,
|
137 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
138 |
+
return_attention_mask: Optional[bool] = True,
|
139 |
+
padding: Optional[str] = "max_length",
|
140 |
+
max_length: Optional[int] = None,
|
141 |
+
**kwargs,
|
142 |
+
) -> BatchFeature:
|
143 |
+
"""
|
144 |
+
`ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the
|
145 |
+
voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`.
|
146 |
+
|
147 |
+
First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length`
|
148 |
+
seconds long and then the log-mel spectrogram is extracted from it.
|
149 |
+
|
150 |
+
Args:
|
151 |
+
raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
|
152 |
+
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
|
153 |
+
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
|
154 |
+
stereo, i.e. single float per timestep.
|
155 |
+
sampling_rate (`int`, *optional*):
|
156 |
+
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
|
157 |
+
`sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
|
158 |
+
pipeline.
|
159 |
+
truncation (`bool`, *optional*, default to `True`):
|
160 |
+
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
|
161 |
+
pad_to_multiple_of (`int`, *optional*):
|
162 |
+
If set will pad the sequence to a multiple of the provided value.
|
163 |
+
|
164 |
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
|
165 |
+
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
|
166 |
+
return_attention_mask (`bool`, *optional*, defaults to `True`):
|
167 |
+
Whether to return the attention mask. If left to the default, it will return the attention mask.
|
168 |
+
|
169 |
+
[What are attention masks?](../glossary#attention-mask)
|
170 |
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
171 |
+
If set, will return tensors instead of list of python integers. Acceptable values are:
|
172 |
+
|
173 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
174 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
175 |
+
- `'np'`: Return Numpy `np.ndarray` objects.
|
176 |
+
padding_value (`float`, defaults to 0.0):
|
177 |
+
The value that is used to fill the padding values / vectors.
|
178 |
+
max_length (`int`, *optional*):
|
179 |
+
The maximum input length of the inputs.
|
180 |
+
"""
|
181 |
+
|
182 |
+
if sampling_rate is not None:
|
183 |
+
if sampling_rate != self.sampling_rate:
|
184 |
+
raise ValueError(
|
185 |
+
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
|
186 |
+
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
|
187 |
+
f" was sampled with {self.sampling_rate} and not {sampling_rate}."
|
188 |
+
)
|
189 |
+
else:
|
190 |
+
logger.warning(
|
191 |
+
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
|
192 |
+
"Failing to do so can result in silent errors that might be hard to debug."
|
193 |
+
)
|
194 |
+
|
195 |
+
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
|
196 |
+
if is_batched_numpy and len(raw_speech.shape) > 2:
|
197 |
+
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
|
198 |
+
is_batched = is_batched_numpy or (
|
199 |
+
isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
|
200 |
+
)
|
201 |
+
|
202 |
+
if is_batched:
|
203 |
+
raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]
|
204 |
+
elif not is_batched and not isinstance(raw_speech, np.ndarray):
|
205 |
+
raw_speech = np.asarray(raw_speech, dtype=np.float32)
|
206 |
+
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
|
207 |
+
raw_speech = raw_speech.astype(np.float32)
|
208 |
+
|
209 |
+
# always return batch
|
210 |
+
if not is_batched:
|
211 |
+
raw_speech = [np.asarray([raw_speech]).T]
|
212 |
+
|
213 |
+
batched_speech = BatchFeature({"input_features": raw_speech})
|
214 |
+
|
215 |
+
max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length
|
216 |
+
|
217 |
+
padded_inputs = self.pad(
|
218 |
+
batched_speech,
|
219 |
+
padding=padding,
|
220 |
+
max_length=max_length,
|
221 |
+
truncation=truncation,
|
222 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
223 |
+
return_attention_mask=return_attention_mask,
|
224 |
+
)
|
225 |
+
|
226 |
+
# make sure list is in array format
|
227 |
+
input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
|
228 |
+
|
229 |
+
input_features = [
|
230 |
+
self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0]
|
231 |
+
]
|
232 |
+
|
233 |
+
if isinstance(input_features[0], List):
|
234 |
+
padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features]
|
235 |
+
else:
|
236 |
+
padded_inputs["input_features"] = input_features
|
237 |
+
|
238 |
+
return padded_inputs.convert_to_tensors(return_tensors)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/modeling_clvp.py
ADDED
@@ -0,0 +1,2022 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
""" PyTorch CLVP model."""
|
17 |
+
|
18 |
+
|
19 |
+
import copy
|
20 |
+
import math
|
21 |
+
from dataclasses import dataclass
|
22 |
+
from typing import Dict, Optional, Tuple, Union
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import torch.utils.checkpoint
|
26 |
+
from torch import nn
|
27 |
+
from torch.nn import CrossEntropyLoss
|
28 |
+
|
29 |
+
from ...activations import ACT2FN
|
30 |
+
from ...generation import GenerationConfig
|
31 |
+
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
|
32 |
+
from ...modeling_outputs import (
|
33 |
+
BaseModelOutput,
|
34 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
35 |
+
BaseModelOutputWithPooling,
|
36 |
+
CausalLMOutputWithCrossAttentions,
|
37 |
+
)
|
38 |
+
from ...modeling_utils import PreTrainedModel, SequenceSummary
|
39 |
+
from ...pytorch_utils import Conv1D
|
40 |
+
from ...utils import (
|
41 |
+
ModelOutput,
|
42 |
+
add_start_docstrings,
|
43 |
+
add_start_docstrings_to_model_forward,
|
44 |
+
logging,
|
45 |
+
replace_return_docstrings,
|
46 |
+
)
|
47 |
+
from .configuration_clvp import (
|
48 |
+
ClvpConfig,
|
49 |
+
ClvpDecoderConfig,
|
50 |
+
ClvpEncoderConfig,
|
51 |
+
)
|
52 |
+
|
53 |
+
|
54 |
+
logger = logging.get_logger(__name__)
|
55 |
+
|
56 |
+
_CHECKPOINT_FOR_DOC = "susnato/clvp_dev"
|
57 |
+
|
58 |
+
|
59 |
+
from ..deprecated._archive_maps import CLVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
60 |
+
|
61 |
+
|
62 |
+
# Copied from transformers.models.clip.modeling_clip.contrastive_loss
|
63 |
+
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
|
64 |
+
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
|
65 |
+
|
66 |
+
|
67 |
+
# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->clvp, image_loss->speech_loss
|
68 |
+
def clvp_loss(similarity: torch.Tensor) -> torch.Tensor:
|
69 |
+
caption_loss = contrastive_loss(similarity)
|
70 |
+
speech_loss = contrastive_loss(similarity.t())
|
71 |
+
return (caption_loss + speech_loss) / 2.0
|
72 |
+
|
73 |
+
|
74 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
75 |
+
def rotate_half(x):
|
76 |
+
"""Rotates half the hidden dims of the input."""
|
77 |
+
x1 = x[..., : x.shape[-1] // 2]
|
78 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
79 |
+
return torch.cat((-x2, x1), dim=-1)
|
80 |
+
|
81 |
+
|
82 |
+
def apply_rotary_pos_emb(q, k, v, cos, sin, position_ids, unsqueeze_dim=1):
|
83 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
84 |
+
|
85 |
+
Args:
|
86 |
+
q (`torch.Tensor`): The query tensor.
|
87 |
+
k (`torch.Tensor`): The key tensor.
|
88 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
89 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
90 |
+
position_ids (`torch.Tensor`):
|
91 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
92 |
+
used to pass offsetted position ids when working with a KV-cache.
|
93 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
94 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
95 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
96 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
97 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
98 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
99 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
100 |
+
Returns:
|
101 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
102 |
+
"""
|
103 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
104 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
105 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
106 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
107 |
+
v_embed = (v * cos) + (rotate_half(v) * sin)
|
108 |
+
return q_embed, k_embed, v_embed
|
109 |
+
|
110 |
+
|
111 |
+
def _pad_extra_bos_eos_tokens(
|
112 |
+
input_ids,
|
113 |
+
attention_mask=None,
|
114 |
+
pad_token_id=0,
|
115 |
+
bos_token_id=255,
|
116 |
+
eos_token_id=0,
|
117 |
+
add_bos_token=True,
|
118 |
+
add_eos_token=True,
|
119 |
+
):
|
120 |
+
"""
|
121 |
+
This method adds extra bos and eos tokens to input_ids and accordingly modifies the attention_mask which is used in
|
122 |
+
`ClvpConditioningEncoder` and the generation loop of the `ClvpModelForConditionalGeneration`.
|
123 |
+
"""
|
124 |
+
|
125 |
+
# add the bos token at the beginning
|
126 |
+
if add_bos_token:
|
127 |
+
input_ids = torch.nn.functional.pad(input_ids, (1, 0), value=bos_token_id)
|
128 |
+
attention_mask = (
|
129 |
+
torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask
|
130 |
+
)
|
131 |
+
|
132 |
+
modified_input_ids = input_ids
|
133 |
+
if add_eos_token:
|
134 |
+
modified_input_ids = torch.zeros(
|
135 |
+
(input_ids.shape[0], input_ids.shape[1] + 1), dtype=input_ids.dtype, device=input_ids.device
|
136 |
+
)
|
137 |
+
for i, each_input_id in enumerate(input_ids):
|
138 |
+
# locate where the valid tokens end and then add the eos token
|
139 |
+
if torch.isin(each_input_id, pad_token_id).sum():
|
140 |
+
pos = torch.where(each_input_id == pad_token_id)[0].min()
|
141 |
+
modified_input_ids[i] = torch.concatenate(
|
142 |
+
[each_input_id[:pos], torch.tensor([eos_token_id], device=input_ids.device), each_input_id[pos:]]
|
143 |
+
)
|
144 |
+
else:
|
145 |
+
# if there are no pad tokens present, then add eos to the end
|
146 |
+
modified_input_ids[i] = torch.nn.functional.pad(each_input_id, (0, 1), value=eos_token_id)
|
147 |
+
attention_mask = (
|
148 |
+
torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask
|
149 |
+
)
|
150 |
+
|
151 |
+
return modified_input_ids, attention_mask
|
152 |
+
|
153 |
+
|
154 |
+
@dataclass
|
155 |
+
class ClvpEncoderOutput(ModelOutput):
|
156 |
+
"""
|
157 |
+
Base class for CLVP encoder's outputs that contains a pooling of the last hidden states as well as a projection
|
158 |
+
output (a linear layer on top of the pooled output).
|
159 |
+
|
160 |
+
Args:
|
161 |
+
embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`):
|
162 |
+
The embeddings obtained by applying the projection layer to the pooler_output.
|
163 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
164 |
+
The hidden state of the last layer of the model.
|
165 |
+
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
|
166 |
+
Pooled output of the `last_hidden_state`.
|
167 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
168 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
169 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
|
170 |
+
the model at the output of each layer plus the optional initial embedding outputs.
|
171 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
172 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
173 |
+
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
|
174 |
+
the self-attention heads.
|
175 |
+
"""
|
176 |
+
|
177 |
+
embeds: Optional[torch.FloatTensor] = None
|
178 |
+
last_hidden_state: torch.FloatTensor = None
|
179 |
+
pooler_output: Optional[torch.FloatTensor] = None
|
180 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
181 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
182 |
+
|
183 |
+
|
184 |
+
@dataclass
|
185 |
+
class ClvpOutput(ModelOutput):
|
186 |
+
"""
|
187 |
+
Args:
|
188 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
189 |
+
Contrastive loss for speech-text similarity.
|
190 |
+
speech_ids (`torch.LongTensor`, *optional*):
|
191 |
+
speech_ids (or speech candidates) generated by the `ClvpForCausalLM` model.
|
192 |
+
logits_per_speech (`torch.FloatTensor` of shape `(speech_batch_size, text_batch_size)`):
|
193 |
+
The scaled dot product scores between `speech_embeds` and `text_embeds`. This represents the speech-text
|
194 |
+
similarity scores.
|
195 |
+
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, speech_batch_size)`):
|
196 |
+
The scaled dot product scores between `text_embeds` and `speech_embeds`. This represents the text-speech
|
197 |
+
similarity scores.
|
198 |
+
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
199 |
+
The text embeddings obtained by applying the projection layer to the pooled output of the text encoder
|
200 |
+
model.
|
201 |
+
speech_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
202 |
+
The speech embeddings obtained by applying the projection layer to the pooled output of the speech encoder
|
203 |
+
model.
|
204 |
+
text_model_output (`BaseModelOutputWithPooling`):
|
205 |
+
The pooled output of the `last_hidden_state` of the text encoder Model.
|
206 |
+
speech_model_output (`BaseModelOutputWithPooling`):
|
207 |
+
The pooled output of the `last_hidden_state` of the speech encoder Model.
|
208 |
+
decoder_hidden_states (`torch.FloatTensor`, *optional*):
|
209 |
+
The hidden states of the decoder model.
|
210 |
+
text_encoder_hidden_states (`torch.FloatTensor`, *optional*):
|
211 |
+
The hidden states of the text encoder model.
|
212 |
+
speech_encoder_hidden_states (`torch.FloatTensor`, *optional*):
|
213 |
+
The hidden states of the speech encoder model.
|
214 |
+
"""
|
215 |
+
|
216 |
+
loss: Optional[torch.FloatTensor] = None
|
217 |
+
speech_ids: Optional[torch.LongTensor] = None
|
218 |
+
logits_per_speech: torch.FloatTensor = None
|
219 |
+
logits_per_text: torch.FloatTensor = None
|
220 |
+
text_embeds: torch.FloatTensor = None
|
221 |
+
speech_embeds: torch.FloatTensor = None
|
222 |
+
text_model_output: BaseModelOutputWithPooling = None
|
223 |
+
speech_model_output: BaseModelOutputWithPooling = None
|
224 |
+
decoder_hidden_states: torch.FloatTensor = None
|
225 |
+
text_encoder_hidden_states: torch.FloatTensor = None
|
226 |
+
speech_encoder_hidden_states: torch.FloatTensor = None
|
227 |
+
|
228 |
+
|
229 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Clvp
|
230 |
+
class ClvpRMSNorm(nn.Module):
|
231 |
+
def __init__(self, hidden_size, eps=1e-6):
|
232 |
+
"""
|
233 |
+
ClvpRMSNorm is equivalent to T5LayerNorm
|
234 |
+
"""
|
235 |
+
super().__init__()
|
236 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
237 |
+
self.variance_epsilon = eps
|
238 |
+
|
239 |
+
def forward(self, hidden_states):
|
240 |
+
input_dtype = hidden_states.dtype
|
241 |
+
hidden_states = hidden_states.to(torch.float32)
|
242 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
243 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
244 |
+
return self.weight * hidden_states.to(input_dtype)
|
245 |
+
|
246 |
+
|
247 |
+
class ClvpRotaryPositionalEmbedding(nn.Module):
|
248 |
+
"""
|
249 |
+
Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY
|
250 |
+
POSITION EMBEDDING', Please see https://arxiv.org/pdf/2104.09864v1.pdf .
|
251 |
+
"""
|
252 |
+
|
253 |
+
def __init__(self, config):
|
254 |
+
super().__init__()
|
255 |
+
dim = max(config.projection_dim // (config.num_attention_heads * 2), 32)
|
256 |
+
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
|
257 |
+
|
258 |
+
self.register_buffer("inv_freq", inv_freq)
|
259 |
+
self.cached_sequence_length = None
|
260 |
+
self.cached_rotary_positional_embedding = None
|
261 |
+
|
262 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
263 |
+
sequence_length = hidden_states.shape[1]
|
264 |
+
|
265 |
+
if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
|
266 |
+
return self.cached_rotary_positional_embedding
|
267 |
+
|
268 |
+
self.cached_sequence_length = sequence_length
|
269 |
+
time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq)
|
270 |
+
freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
|
271 |
+
embeddings = torch.cat((freqs, freqs), dim=-1)
|
272 |
+
|
273 |
+
self.cached_rotary_positional_embedding = embeddings.unsqueeze(0)
|
274 |
+
return self.cached_rotary_positional_embedding
|
275 |
+
|
276 |
+
|
277 |
+
class ClvpSelfAttention(nn.Module):
|
278 |
+
"""
|
279 |
+
Multi-headed attention to combine Absolute and Rotary Positional Embeddings into a single Attention module.
|
280 |
+
"""
|
281 |
+
|
282 |
+
def __init__(self, config):
|
283 |
+
super().__init__()
|
284 |
+
self.config = config
|
285 |
+
self.embed_dim = config.hidden_size
|
286 |
+
self.num_heads = config.num_attention_heads
|
287 |
+
self.head_dim = self.embed_dim // self.num_heads
|
288 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
289 |
+
raise ValueError(
|
290 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
291 |
+
f" {self.num_heads})."
|
292 |
+
)
|
293 |
+
self.scale = self.head_dim**-0.5
|
294 |
+
self.dropout = config.attention_dropout
|
295 |
+
|
296 |
+
if hasattr(config, "max_position_embeddings"):
|
297 |
+
max_positions = config.max_position_embeddings
|
298 |
+
bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool))
|
299 |
+
bias = bias.view(1, 1, max_positions, max_positions)
|
300 |
+
self.register_buffer("bias", bias, persistent=False)
|
301 |
+
|
302 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
|
303 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
|
304 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
|
305 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
306 |
+
|
307 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPAttention._shape
|
308 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
309 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
310 |
+
|
311 |
+
def forward(
|
312 |
+
self,
|
313 |
+
hidden_states: torch.FloatTensor,
|
314 |
+
rotary_pos_emb: Optional[torch.FloatTensor] = None,
|
315 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
316 |
+
position_ids: Optional[torch.LongTensor] = None,
|
317 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
318 |
+
use_cache: Optional[bool] = False,
|
319 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
320 |
+
output_attentions: Optional[bool] = False,
|
321 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
322 |
+
# Raise error when position_ids is None but rotary_pos_emb is provided, because we need that when applying
|
323 |
+
# rotary_pos_emb to query and key states.
|
324 |
+
if rotary_pos_emb is not None and position_ids is None:
|
325 |
+
raise ValueError("`position_ids` must be provided when `rotary_pos_emb` is not None.")
|
326 |
+
|
327 |
+
bsz, _, embed_dim = hidden_states.size()
|
328 |
+
|
329 |
+
# get query proj
|
330 |
+
query_states = self._shape(self.q_proj(hidden_states), -1, bsz) * self.scale
|
331 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
332 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
333 |
+
|
334 |
+
if past_key_value is not None:
|
335 |
+
past_key, past_value = past_key_value
|
336 |
+
key_states = torch.cat((past_key, key_states), dim=-2)
|
337 |
+
value_states = torch.cat((past_value, value_states), dim=-2)
|
338 |
+
|
339 |
+
if use_cache is True:
|
340 |
+
present = (key_states, value_states)
|
341 |
+
else:
|
342 |
+
present = None
|
343 |
+
|
344 |
+
if rotary_pos_emb is not None:
|
345 |
+
rotary_emb_dim = rotary_pos_emb.shape[-1]
|
346 |
+
|
347 |
+
# Partial rotary embedding
|
348 |
+
query_rot, query_pass = (
|
349 |
+
query_states[..., :rotary_emb_dim],
|
350 |
+
query_states[..., rotary_emb_dim:],
|
351 |
+
)
|
352 |
+
key_rot, key_pass = (
|
353 |
+
key_states[..., :rotary_emb_dim],
|
354 |
+
key_states[..., rotary_emb_dim:],
|
355 |
+
)
|
356 |
+
value_rot, value_pass = (
|
357 |
+
value_states[..., :rotary_emb_dim],
|
358 |
+
value_states[..., rotary_emb_dim:],
|
359 |
+
)
|
360 |
+
|
361 |
+
cos, sin = rotary_pos_emb.cos().squeeze(0), rotary_pos_emb.sin().squeeze(0)
|
362 |
+
query_rot, key_rot, value_rot = apply_rotary_pos_emb(query_rot, key_rot, value_rot, cos, sin, position_ids)
|
363 |
+
|
364 |
+
# [batch_size, num_heads, seq_length, head_dim]
|
365 |
+
query_states = torch.cat((query_rot, query_pass), dim=-1)
|
366 |
+
key_states = torch.cat((key_rot, key_pass), dim=-1)
|
367 |
+
value_states = torch.cat((value_rot, value_pass), dim=-1)
|
368 |
+
|
369 |
+
tgt_len = query_states.shape[2]
|
370 |
+
src_len = key_states.shape[2]
|
371 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3))
|
372 |
+
|
373 |
+
if attention_mask is not None:
|
374 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
375 |
+
raise ValueError(
|
376 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
377 |
+
)
|
378 |
+
attn_weights = attn_weights + attention_mask
|
379 |
+
|
380 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
381 |
+
|
382 |
+
# Mask heads if we want to
|
383 |
+
if head_mask is not None:
|
384 |
+
attn_weights = attn_weights * head_mask
|
385 |
+
|
386 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
387 |
+
attn_output = torch.matmul(attn_probs, value_states)
|
388 |
+
|
389 |
+
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
|
390 |
+
raise ValueError(
|
391 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
392 |
+
f" {attn_output.size()}"
|
393 |
+
)
|
394 |
+
|
395 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
396 |
+
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
397 |
+
|
398 |
+
attn_output = self.out_proj(attn_output)
|
399 |
+
|
400 |
+
if not output_attentions:
|
401 |
+
attn_weights = None
|
402 |
+
|
403 |
+
return attn_output, present, attn_weights
|
404 |
+
|
405 |
+
|
406 |
+
class ClvpGatedLinearUnit(nn.Module):
|
407 |
+
"""
|
408 |
+
`ClvpGatedLinearUnit` uses the second half of the `hidden_states` to act as a gate for the first half of the
|
409 |
+
`hidden_states` which controls the flow of data from the first of the tensor.
|
410 |
+
"""
|
411 |
+
|
412 |
+
def __init__(self, config):
|
413 |
+
super().__init__()
|
414 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
415 |
+
self.proj = nn.Linear(config.hidden_size, config.intermediate_size * 2)
|
416 |
+
|
417 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
418 |
+
hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
|
419 |
+
return hidden_states * self.activation_fn(gate)
|
420 |
+
|
421 |
+
|
422 |
+
class ClvpEncoderMLP(nn.Module):
|
423 |
+
"""
|
424 |
+
This MLP is used in CLVP speech or text encoder models.
|
425 |
+
"""
|
426 |
+
|
427 |
+
def __init__(self, config):
|
428 |
+
super().__init__()
|
429 |
+
self.config = config
|
430 |
+
|
431 |
+
self.fc1 = ClvpGatedLinearUnit(config)
|
432 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
433 |
+
self.dropout_layer = nn.Dropout(config.dropout)
|
434 |
+
|
435 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
436 |
+
hidden_states = self.fc1(hidden_states)
|
437 |
+
hidden_states = self.dropout_layer(hidden_states)
|
438 |
+
hidden_states = self.fc2(hidden_states)
|
439 |
+
return hidden_states
|
440 |
+
|
441 |
+
|
442 |
+
class ClvpEncoderLayer(nn.Module):
|
443 |
+
def __init__(self, config: ClvpConfig):
|
444 |
+
super().__init__()
|
445 |
+
self.config = config
|
446 |
+
self.embed_dim = config.hidden_size
|
447 |
+
self.self_attn = ClvpSelfAttention(config)
|
448 |
+
self.mlp = ClvpEncoderMLP(config)
|
449 |
+
|
450 |
+
self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
451 |
+
self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
452 |
+
|
453 |
+
def forward(
|
454 |
+
self,
|
455 |
+
hidden_states: torch.FloatTensor,
|
456 |
+
rotary_pos_emb: torch.FloatTensor,
|
457 |
+
attention_mask: torch.LongTensor,
|
458 |
+
position_ids: torch.LongTensor,
|
459 |
+
output_attentions: Optional[bool] = False,
|
460 |
+
) -> Tuple[torch.FloatTensor]:
|
461 |
+
"""
|
462 |
+
Args:
|
463 |
+
hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
|
464 |
+
input to the layer.
|
465 |
+
rotary_pos_emb (`torch.FloatTensor`):
|
466 |
+
rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module.
|
467 |
+
attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`):
|
468 |
+
attention mask where padding elements are indicated by very large negative values.
|
469 |
+
position_ids (`torch.LongTensor`):
|
470 |
+
Denotes position ids of the input tokens.
|
471 |
+
output_attentions (`bool`, *optional*, defaults to `False`):
|
472 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
473 |
+
returned tensors for more detail.
|
474 |
+
"""
|
475 |
+
residual = hidden_states
|
476 |
+
|
477 |
+
hidden_states = self.input_rmsnorm(hidden_states)
|
478 |
+
|
479 |
+
attention_outputs = self.self_attn(
|
480 |
+
hidden_states=hidden_states,
|
481 |
+
rotary_pos_emb=rotary_pos_emb,
|
482 |
+
attention_mask=attention_mask,
|
483 |
+
position_ids=position_ids,
|
484 |
+
output_attentions=output_attentions,
|
485 |
+
)
|
486 |
+
|
487 |
+
hidden_states = attention_outputs[0]
|
488 |
+
|
489 |
+
hidden_states = residual + hidden_states
|
490 |
+
|
491 |
+
residual = hidden_states
|
492 |
+
hidden_states = self.post_attention_rmsnorm(hidden_states)
|
493 |
+
hidden_states = self.mlp(hidden_states)
|
494 |
+
hidden_states = residual + hidden_states
|
495 |
+
|
496 |
+
outputs = (hidden_states,)
|
497 |
+
|
498 |
+
if output_attentions:
|
499 |
+
outputs += (attention_outputs[-1],)
|
500 |
+
|
501 |
+
return outputs
|
502 |
+
|
503 |
+
|
504 |
+
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->ClvpDecoderMLP
|
505 |
+
class ClvpDecoderMLP(nn.Module):
|
506 |
+
def __init__(self, intermediate_size, config):
|
507 |
+
super().__init__()
|
508 |
+
embed_dim = config.hidden_size
|
509 |
+
self.c_fc = Conv1D(intermediate_size, embed_dim)
|
510 |
+
self.c_proj = Conv1D(embed_dim, intermediate_size)
|
511 |
+
self.act = ACT2FN[config.activation_function]
|
512 |
+
self.dropout = nn.Dropout(config.resid_pdrop)
|
513 |
+
|
514 |
+
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
|
515 |
+
hidden_states = self.c_fc(hidden_states)
|
516 |
+
hidden_states = self.act(hidden_states)
|
517 |
+
hidden_states = self.c_proj(hidden_states)
|
518 |
+
hidden_states = self.dropout(hidden_states)
|
519 |
+
return hidden_states
|
520 |
+
|
521 |
+
|
522 |
+
class ClvpDecoderLayer(nn.Module):
|
523 |
+
def __init__(self, config):
|
524 |
+
super().__init__()
|
525 |
+
hidden_size = config.hidden_size
|
526 |
+
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
|
527 |
+
|
528 |
+
self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
529 |
+
self.attn = ClvpSelfAttention(config)
|
530 |
+
self.post_attention_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
531 |
+
|
532 |
+
self.mlp = ClvpDecoderMLP(inner_dim, config)
|
533 |
+
|
534 |
+
def forward(
|
535 |
+
self,
|
536 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
537 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
538 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
539 |
+
position_ids: Optional[torch.LongTensor] = None,
|
540 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
541 |
+
use_cache: Optional[bool] = False,
|
542 |
+
output_attentions: Optional[bool] = False,
|
543 |
+
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
|
544 |
+
residual = hidden_states
|
545 |
+
hidden_states = self.input_layernorm(hidden_states)
|
546 |
+
attn_outputs = self.attn(
|
547 |
+
hidden_states,
|
548 |
+
past_key_value=past_key_value,
|
549 |
+
attention_mask=attention_mask,
|
550 |
+
position_ids=position_ids,
|
551 |
+
head_mask=head_mask,
|
552 |
+
use_cache=use_cache,
|
553 |
+
output_attentions=output_attentions,
|
554 |
+
)
|
555 |
+
attn_output = attn_outputs[0]
|
556 |
+
outputs = attn_outputs[1:]
|
557 |
+
# residual connection
|
558 |
+
hidden_states = attn_output + residual
|
559 |
+
|
560 |
+
residual = hidden_states
|
561 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
562 |
+
feed_forward_hidden_states = self.mlp(hidden_states)
|
563 |
+
# residual connection
|
564 |
+
hidden_states = residual + feed_forward_hidden_states
|
565 |
+
|
566 |
+
if use_cache:
|
567 |
+
outputs = (hidden_states,) + outputs
|
568 |
+
else:
|
569 |
+
outputs = (hidden_states,) + outputs[1:]
|
570 |
+
|
571 |
+
return outputs
|
572 |
+
|
573 |
+
|
574 |
+
class ClvpConditioningEncoder(nn.Module):
|
575 |
+
"""
|
576 |
+
This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the
|
577 |
+
tokenizer) as inputs for the decoder model.
|
578 |
+
|
579 |
+
First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each
|
580 |
+
of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards.
|
581 |
+
Both of these vectors are concatenated and then passed to the decoder model.
|
582 |
+
|
583 |
+
The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the
|
584 |
+
"voice characteristics" into the generated mel tokens.
|
585 |
+
"""
|
586 |
+
|
587 |
+
def __init__(self, config: ClvpConfig):
|
588 |
+
super().__init__()
|
589 |
+
|
590 |
+
self.text_config = config.text_config
|
591 |
+
self.decoder_config = config.decoder_config
|
592 |
+
|
593 |
+
self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size)
|
594 |
+
self.text_position_embedding = nn.Embedding(
|
595 |
+
self.decoder_config.max_text_tokens, self.decoder_config.hidden_size
|
596 |
+
)
|
597 |
+
|
598 |
+
self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1)
|
599 |
+
|
600 |
+
# define group norms to be used before each attention layer
|
601 |
+
num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size)
|
602 |
+
self.group_norms = nn.ModuleList(
|
603 |
+
[
|
604 |
+
nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-5, affine=True)
|
605 |
+
for _ in range(self.decoder_config.num_mel_attn_blocks)
|
606 |
+
]
|
607 |
+
)
|
608 |
+
|
609 |
+
# define the attention layers
|
610 |
+
self.mel_attn_blocks = nn.ModuleList(
|
611 |
+
[ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)]
|
612 |
+
)
|
613 |
+
|
614 |
+
self.gradient_checkpointing = False
|
615 |
+
|
616 |
+
def compute_groupnorm_groups(self, channels: int, groups: int = 32):
|
617 |
+
"""
|
618 |
+
Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise
|
619 |
+
repository. link :
|
620 |
+
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26
|
621 |
+
"""
|
622 |
+
if channels <= 16:
|
623 |
+
groups = 8
|
624 |
+
elif channels <= 64:
|
625 |
+
groups = 16
|
626 |
+
while channels % groups != 0:
|
627 |
+
groups = int(groups / 2)
|
628 |
+
|
629 |
+
if groups <= 2:
|
630 |
+
raise ValueError(
|
631 |
+
f"Number of groups for the GroupNorm must be greater than 2, but it is {groups}."
|
632 |
+
f"Please consider using a different `hidden_size`"
|
633 |
+
)
|
634 |
+
|
635 |
+
return groups
|
636 |
+
|
637 |
+
def forward(
|
638 |
+
self,
|
639 |
+
input_features: torch.FloatTensor,
|
640 |
+
input_ids: Optional[torch.LongTensor] = None,
|
641 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
642 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
643 |
+
):
|
644 |
+
# process text
|
645 |
+
if input_ids is not None and inputs_embeds is not None:
|
646 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
647 |
+
elif input_ids is not None:
|
648 |
+
batch_size, seq_length = input_ids.size()
|
649 |
+
elif inputs_embeds is not None:
|
650 |
+
batch_size, seq_length = inputs_embeds.size()[:-1]
|
651 |
+
else:
|
652 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
653 |
+
|
654 |
+
# construct attention mask if not given
|
655 |
+
if attention_mask is None:
|
656 |
+
attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=input_ids.device)
|
657 |
+
|
658 |
+
# We add bos and eos input_ids in the modeling file instead of the tokenizer file to keep the logic simple
|
659 |
+
# This logic is specific to ClvpConditioningEncoder and not used by other modules.
|
660 |
+
input_ids, attention_mask = _pad_extra_bos_eos_tokens(
|
661 |
+
input_ids,
|
662 |
+
attention_mask,
|
663 |
+
bos_token_id=self.text_config.bos_token_id,
|
664 |
+
eos_token_id=self.text_config.eos_token_id,
|
665 |
+
)
|
666 |
+
|
667 |
+
inputs_embeds = self.text_token_embedding(input_ids)
|
668 |
+
position_ids = attention_mask.cumsum(-1) - 1
|
669 |
+
position_embeds = self.text_position_embedding(position_ids)
|
670 |
+
text_embeds = inputs_embeds + position_embeds
|
671 |
+
|
672 |
+
if self.gradient_checkpointing and self.training:
|
673 |
+
# process each log-mel spectrogram into a single vector
|
674 |
+
mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features)
|
675 |
+
|
676 |
+
for i, mel_attn_block in enumerate(self.mel_attn_blocks):
|
677 |
+
residual_mel_spec = mel_spec.transpose(1, 2)
|
678 |
+
|
679 |
+
mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2)
|
680 |
+
mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec
|
681 |
+
mel_spec = mel_spec.transpose(1, 2)
|
682 |
+
|
683 |
+
else:
|
684 |
+
# process each log-mel spectrogram into a single vector
|
685 |
+
mel_spec = self.mel_conv(input_features)
|
686 |
+
|
687 |
+
for i, mel_attn_block in enumerate(self.mel_attn_blocks):
|
688 |
+
residual_mel_spec = mel_spec.transpose(1, 2)
|
689 |
+
|
690 |
+
mel_spec = self.group_norms[i](mel_spec).transpose(1, 2)
|
691 |
+
mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec
|
692 |
+
mel_spec = mel_spec.transpose(1, 2)
|
693 |
+
|
694 |
+
mel_spec = mel_spec[:, :, 0]
|
695 |
+
mel_spec = mel_spec.unsqueeze(1)
|
696 |
+
|
697 |
+
# repeat if there is either (1 text vs N audios) or (N texts vs 1 audio)
|
698 |
+
if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1:
|
699 |
+
text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1)
|
700 |
+
elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1:
|
701 |
+
mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1)
|
702 |
+
# If there is N texts and M audios we will raise error since the number of text and audio must be same.
|
703 |
+
elif text_embeds.shape[0] != mel_spec.shape[0]:
|
704 |
+
raise ValueError(
|
705 |
+
f"The number of texts and number of audios must be same. "
|
706 |
+
f"Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios"
|
707 |
+
)
|
708 |
+
|
709 |
+
return torch.concat([mel_spec, text_embeds], dim=1)
|
710 |
+
|
711 |
+
|
712 |
+
class ClvpPreTrainedModel(PreTrainedModel):
|
713 |
+
"""
|
714 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
715 |
+
models.
|
716 |
+
"""
|
717 |
+
|
718 |
+
config_class = ClvpConfig
|
719 |
+
base_model_prefix = "clvp"
|
720 |
+
supports_gradient_checkpointing = True
|
721 |
+
_skip_keys_device_placement = "past_key_values"
|
722 |
+
|
723 |
+
def _init_weights(self, module):
|
724 |
+
"""Initialize the weights"""
|
725 |
+
factor = self.config.initializer_factor
|
726 |
+
if isinstance(module, nn.Embedding):
|
727 |
+
module.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
728 |
+
elif isinstance(module, (nn.Linear, Conv1D, nn.Conv1d)):
|
729 |
+
module.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
730 |
+
if module.bias is not None:
|
731 |
+
module.bias.data.zero_()
|
732 |
+
elif isinstance(module, ClvpEncoderMLP):
|
733 |
+
factor = self.config.initializer_factor
|
734 |
+
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
735 |
+
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
|
736 |
+
nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, "proj") else module.fc1.weight, std=fc_std)
|
737 |
+
nn.init.normal_(module.fc2.weight, std=in_proj_std)
|
738 |
+
elif isinstance(module, ClvpEncoder):
|
739 |
+
config = self.config.text_config if hasattr(self.config, "text_config") else self.config
|
740 |
+
factor = config.initializer_factor
|
741 |
+
module.projection.weight.data.normal_(mean=0.0, std=factor * (config.hidden_size**-0.5))
|
742 |
+
elif isinstance(module, ClvpConditioningEncoder):
|
743 |
+
module.mel_conv.weight.data.normal_(mean=0.0, std=factor)
|
744 |
+
module.mel_conv.bias.data.zero_()
|
745 |
+
elif isinstance(module, ClvpForCausalLM):
|
746 |
+
for name, p in module.named_parameters():
|
747 |
+
if name == "c_proj.weight":
|
748 |
+
p.data.normal_(
|
749 |
+
mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers))
|
750 |
+
)
|
751 |
+
if isinstance(module, nn.LayerNorm):
|
752 |
+
module.bias.data.zero_()
|
753 |
+
module.weight.data.fill_(1.0)
|
754 |
+
|
755 |
+
|
756 |
+
CLVP_START_DOCSTRING = r"""
|
757 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
758 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
759 |
+
etc.)
|
760 |
+
|
761 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
762 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
763 |
+
and behavior.
|
764 |
+
|
765 |
+
Parameters:
|
766 |
+
config ([`ClvpConfig`]): Model configuration class with all the parameters of the model.
|
767 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
768 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
769 |
+
"""
|
770 |
+
|
771 |
+
|
772 |
+
CLVP_INPUTS_DOCSTRING = r"""
|
773 |
+
Args:
|
774 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
775 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
776 |
+
it.
|
777 |
+
|
778 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
779 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
780 |
+
|
781 |
+
[What are input IDs?](../glossary#input-ids)
|
782 |
+
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`):
|
783 |
+
Indicates log mel-spectrogram representations for audio returned by [`ClvpFeatureExtractor`].
|
784 |
+
conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
|
785 |
+
inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
|
786 |
+
text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
|
787 |
+
inputs_embeds for the text encoder model passed in place of `input_ids`.
|
788 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
789 |
+
Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:
|
790 |
+
|
791 |
+
- 1 for tokens that are **not masked**,
|
792 |
+
- 0 for tokens that are **masked**.
|
793 |
+
|
794 |
+
[What are attention masks?](../glossary#attention-mask)
|
795 |
+
return_loss (`bool`, *optional*):
|
796 |
+
Whether or not to return the contrastive loss.
|
797 |
+
output_attentions (`bool`, *optional*):
|
798 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
799 |
+
tensors for more detail.
|
800 |
+
output_hidden_states (`bool`, *optional*):
|
801 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
802 |
+
more detail.
|
803 |
+
return_dict (`bool`, *optional*):
|
804 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
805 |
+
"""
|
806 |
+
|
807 |
+
|
808 |
+
CLVP_DECODER_INPUTS_DOCSTRING = r"""
|
809 |
+
Args:
|
810 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
|
811 |
+
Indices of input sequence tokens in the vocabulary.
|
812 |
+
|
813 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
814 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
815 |
+
|
816 |
+
[What are input IDs?](../glossary#input-ids)
|
817 |
+
past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
|
818 |
+
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
|
819 |
+
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
|
820 |
+
their past given to this model should not be passed as `input_ids` as they have already been computed.
|
821 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
822 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
823 |
+
|
824 |
+
- 1 for tokens that are **not masked**,
|
825 |
+
- 0 for tokens that are **masked**.
|
826 |
+
|
827 |
+
If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
|
828 |
+
`past_key_values`. In other words, the `attention_mask` always has to have the length:
|
829 |
+
`len(past_key_values) + len(input_ids)`
|
830 |
+
|
831 |
+
[What are attention masks?](../glossary#attention-mask)
|
832 |
+
token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
|
833 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
834 |
+
1]`:
|
835 |
+
|
836 |
+
- 0 corresponds to a *sentence A* token,
|
837 |
+
- 1 corresponds to a *sentence B* token.
|
838 |
+
|
839 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
840 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
841 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
842 |
+
config.max_position_embeddings - 1]`.
|
843 |
+
|
844 |
+
[What are position IDs?](../glossary#position-ids)
|
845 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
846 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
847 |
+
|
848 |
+
- 1 indicates the head is **not masked**,
|
849 |
+
- 0 indicates the head is **masked**.
|
850 |
+
|
851 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
852 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
853 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
854 |
+
model's internal embedding lookup matrix.
|
855 |
+
|
856 |
+
If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
|
857 |
+
`past_key_values`).
|
858 |
+
use_cache (`bool`, *optional*):
|
859 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
860 |
+
`past_key_values`).
|
861 |
+
output_attentions (`bool`, *optional*):
|
862 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
863 |
+
tensors for more detail.
|
864 |
+
output_hidden_states (`bool`, *optional*):
|
865 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
866 |
+
more detail.
|
867 |
+
return_dict (`bool`, *optional*):
|
868 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
869 |
+
"""
|
870 |
+
|
871 |
+
|
872 |
+
class ClvpEncoder(ClvpPreTrainedModel):
|
873 |
+
"""
|
874 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
875 |
+
[`ClvpEncoderLayer`].
|
876 |
+
|
877 |
+
Args:
|
878 |
+
config: ClvpConfig
|
879 |
+
"""
|
880 |
+
|
881 |
+
def __init__(self, config: ClvpConfig):
|
882 |
+
super().__init__(config)
|
883 |
+
|
884 |
+
self.config = config
|
885 |
+
self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
|
886 |
+
self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None
|
887 |
+
self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
888 |
+
|
889 |
+
self.sequence_summary = SequenceSummary(config)
|
890 |
+
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
891 |
+
|
892 |
+
self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
893 |
+
|
894 |
+
self.gradient_checkpointing = False
|
895 |
+
|
896 |
+
self.post_init()
|
897 |
+
|
898 |
+
def get_input_embeddings(self):
|
899 |
+
return self.token_embedding
|
900 |
+
|
901 |
+
def set_input_embeddings(self, value):
|
902 |
+
self.token_embedding = value
|
903 |
+
|
904 |
+
def forward(
|
905 |
+
self,
|
906 |
+
input_ids: Optional[torch.LongTensor] = None,
|
907 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
908 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
909 |
+
position_ids: Optional[torch.LongTensor] = None,
|
910 |
+
output_attentions: Optional[bool] = None,
|
911 |
+
output_hidden_states: Optional[bool] = None,
|
912 |
+
return_dict: Optional[bool] = None,
|
913 |
+
) -> Union[Tuple, BaseModelOutput]:
|
914 |
+
r"""
|
915 |
+
Args:
|
916 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
|
917 |
+
Indices of input sequence tokens in the vocabulary.
|
918 |
+
|
919 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
920 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
921 |
+
|
922 |
+
[What are input IDs?](../glossary#input-ids)
|
923 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
924 |
+
input embeddings for the model. This bypasses the model's internal embedding lookup matrix.
|
925 |
+
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
926 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
927 |
+
|
928 |
+
- 1 for tokens that are **not masked**,
|
929 |
+
- 0 for tokens that are **masked**.
|
930 |
+
|
931 |
+
[What are attention masks?](../glossary#attention-mask)
|
932 |
+
position_ids (`torch.LongTensor`, *optional*):
|
933 |
+
Denotes the position ids of `input_ids`.
|
934 |
+
output_attentions (`bool`, *optional*):
|
935 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
936 |
+
returned tensors for more detail.
|
937 |
+
output_hidden_states (`bool`, *optional*):
|
938 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
939 |
+
for more detail.
|
940 |
+
return_dict (`bool`, *optional*):
|
941 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
942 |
+
"""
|
943 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
944 |
+
output_hidden_states = (
|
945 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
946 |
+
)
|
947 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
948 |
+
|
949 |
+
if input_ids is not None and inputs_embeds is not None:
|
950 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
951 |
+
elif input_ids is not None:
|
952 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
953 |
+
input_shape = input_ids.size()
|
954 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
955 |
+
inputs_embeds = self.token_embedding(input_ids)
|
956 |
+
elif inputs_embeds is not None:
|
957 |
+
input_shape = inputs_embeds.size()[:-1]
|
958 |
+
else:
|
959 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
960 |
+
|
961 |
+
# expand attention_mask and create position_ids if needed
|
962 |
+
if attention_mask is not None:
|
963 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
964 |
+
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
|
965 |
+
|
966 |
+
if position_ids is None:
|
967 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
968 |
+
position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device)
|
969 |
+
position_ids = position_ids.unsqueeze(0)
|
970 |
+
|
971 |
+
encoder_states = () if output_hidden_states else None
|
972 |
+
all_attentions = () if output_attentions else None
|
973 |
+
|
974 |
+
rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None
|
975 |
+
|
976 |
+
hidden_states = inputs_embeds
|
977 |
+
for idx, encoder_layer in enumerate(self.layers):
|
978 |
+
if output_hidden_states:
|
979 |
+
encoder_states = encoder_states + (hidden_states,)
|
980 |
+
if self.gradient_checkpointing and self.training:
|
981 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
982 |
+
encoder_layer.__call__,
|
983 |
+
hidden_states,
|
984 |
+
rotary_pos_emb,
|
985 |
+
attention_mask,
|
986 |
+
position_ids,
|
987 |
+
)
|
988 |
+
else:
|
989 |
+
layer_outputs = encoder_layer(
|
990 |
+
hidden_states,
|
991 |
+
rotary_pos_emb,
|
992 |
+
attention_mask,
|
993 |
+
position_ids,
|
994 |
+
output_attentions=output_attentions,
|
995 |
+
)
|
996 |
+
|
997 |
+
hidden_states = layer_outputs[0]
|
998 |
+
|
999 |
+
if output_attentions:
|
1000 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
1001 |
+
|
1002 |
+
if output_hidden_states:
|
1003 |
+
encoder_states = encoder_states + (hidden_states,)
|
1004 |
+
|
1005 |
+
last_hidden_state = hidden_states
|
1006 |
+
last_hidden_state = self.final_layer_norm(last_hidden_state)
|
1007 |
+
|
1008 |
+
# take the mean over axis 1 and get pooled output
|
1009 |
+
pooled_output = self.sequence_summary(last_hidden_state)
|
1010 |
+
|
1011 |
+
# apply the projection layer
|
1012 |
+
embeds = self.projection(pooled_output)
|
1013 |
+
|
1014 |
+
if not return_dict:
|
1015 |
+
return tuple(
|
1016 |
+
v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
return ClvpEncoderOutput(
|
1020 |
+
embeds=embeds,
|
1021 |
+
last_hidden_state=last_hidden_state,
|
1022 |
+
pooler_output=pooled_output,
|
1023 |
+
hidden_states=encoder_states,
|
1024 |
+
attentions=all_attentions,
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
|
1028 |
+
class ClvpDecoder(ClvpPreTrainedModel):
|
1029 |
+
"""
|
1030 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ClvpDecoderLayer`]
|
1031 |
+
"""
|
1032 |
+
|
1033 |
+
def __init__(self, config):
|
1034 |
+
super().__init__(config)
|
1035 |
+
|
1036 |
+
self.config = config
|
1037 |
+
|
1038 |
+
self.input_embeds_layer = nn.Embedding(self.config.vocab_size, self.config.hidden_size)
|
1039 |
+
self.position_embeds_layer = nn.Embedding(self.config.max_position_embeddings, self.config.hidden_size)
|
1040 |
+
|
1041 |
+
self.drop = nn.Dropout(self.config.embd_pdrop)
|
1042 |
+
self.layers = nn.ModuleList([ClvpDecoderLayer(self.config) for _ in range(self.config.num_hidden_layers)])
|
1043 |
+
self.layer_norm = nn.LayerNorm(self.config.hidden_size, eps=self.config.layer_norm_epsilon)
|
1044 |
+
|
1045 |
+
self.gradient_checkpointing = False
|
1046 |
+
|
1047 |
+
# Initialize weights and apply final processing
|
1048 |
+
self.post_init()
|
1049 |
+
|
1050 |
+
def get_input_embeddings(self):
|
1051 |
+
return self.input_embeds_layer
|
1052 |
+
|
1053 |
+
def set_input_embeddings(self, new_embeddings):
|
1054 |
+
self.input_embeds_layer = new_embeddings
|
1055 |
+
|
1056 |
+
def _prune_heads(self, heads_to_prune):
|
1057 |
+
"""
|
1058 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
1059 |
+
"""
|
1060 |
+
for layer, heads in heads_to_prune.items():
|
1061 |
+
self.layers[layer].attn.prune_heads(heads)
|
1062 |
+
|
1063 |
+
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
|
1064 |
+
def forward(
|
1065 |
+
self,
|
1066 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1067 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1068 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1069 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1070 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1071 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1072 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1073 |
+
use_cache: Optional[bool] = None,
|
1074 |
+
output_attentions: Optional[bool] = None,
|
1075 |
+
output_hidden_states: Optional[bool] = None,
|
1076 |
+
return_dict: Optional[bool] = None,
|
1077 |
+
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
|
1078 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1079 |
+
output_hidden_states = (
|
1080 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1081 |
+
)
|
1082 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1083 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1084 |
+
|
1085 |
+
if input_ids is not None and inputs_embeds is not None:
|
1086 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
1087 |
+
elif input_ids is not None:
|
1088 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
1089 |
+
input_shape = input_ids.size()
|
1090 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
1091 |
+
input_ids.shape[0]
|
1092 |
+
elif inputs_embeds is not None:
|
1093 |
+
input_shape = inputs_embeds.size()[:-1]
|
1094 |
+
inputs_embeds.shape[0]
|
1095 |
+
else:
|
1096 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1097 |
+
|
1098 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1099 |
+
|
1100 |
+
if token_type_ids is not None:
|
1101 |
+
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
1102 |
+
|
1103 |
+
if past_key_values is None:
|
1104 |
+
past_key_values_length = 0
|
1105 |
+
past_key_values = tuple([None] * len(self.layers))
|
1106 |
+
else:
|
1107 |
+
past_key_values_length = past_key_values[0][0].size(-2)
|
1108 |
+
if position_ids is None:
|
1109 |
+
position_ids = torch.arange(
|
1110 |
+
past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=device
|
1111 |
+
)
|
1112 |
+
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
|
1113 |
+
|
1114 |
+
if inputs_embeds is None:
|
1115 |
+
inputs_embeds = self.input_embeds_layer(input_ids)
|
1116 |
+
position_embeds = self.position_embeds_layer(position_ids)
|
1117 |
+
inputs_embeds = inputs_embeds + position_embeds
|
1118 |
+
|
1119 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
1120 |
+
attention_mask, input_shape, inputs_embeds, past_key_values_length
|
1121 |
+
)
|
1122 |
+
|
1123 |
+
# Prepare head mask if needed
|
1124 |
+
# 1.0 in head_mask indicate we keep the head
|
1125 |
+
# attention_probs has shape bsz x num_attention_heads x N x N
|
1126 |
+
# head_mask has shape num_hidden_layers x batch x num_attention_heads x N x N
|
1127 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
1128 |
+
|
1129 |
+
hidden_states = inputs_embeds
|
1130 |
+
|
1131 |
+
if token_type_ids is not None:
|
1132 |
+
token_type_embeds = self.input_embeds_layer(token_type_ids)
|
1133 |
+
hidden_states = hidden_states + token_type_embeds
|
1134 |
+
|
1135 |
+
hidden_states = self.drop(hidden_states)
|
1136 |
+
|
1137 |
+
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
|
1138 |
+
|
1139 |
+
if self.gradient_checkpointing and self.training:
|
1140 |
+
if use_cache:
|
1141 |
+
logger.warning_once(
|
1142 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
1143 |
+
)
|
1144 |
+
use_cache = False
|
1145 |
+
|
1146 |
+
presents = () if use_cache else None
|
1147 |
+
all_self_attentions = () if output_attentions else None
|
1148 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
1149 |
+
all_hidden_states = () if output_hidden_states else None
|
1150 |
+
for i, (block, past_key_value) in enumerate(zip(self.layers, past_key_values)):
|
1151 |
+
if output_hidden_states:
|
1152 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
1153 |
+
|
1154 |
+
if self.gradient_checkpointing and self.training:
|
1155 |
+
outputs = torch.utils.checkpoint.checkpoint(
|
1156 |
+
block.__call__,
|
1157 |
+
hidden_states,
|
1158 |
+
None,
|
1159 |
+
attention_mask,
|
1160 |
+
position_ids,
|
1161 |
+
head_mask[i],
|
1162 |
+
)
|
1163 |
+
else:
|
1164 |
+
outputs = block(
|
1165 |
+
hidden_states,
|
1166 |
+
past_key_value=past_key_value,
|
1167 |
+
attention_mask=attention_mask,
|
1168 |
+
position_ids=position_ids,
|
1169 |
+
head_mask=head_mask[i],
|
1170 |
+
use_cache=use_cache,
|
1171 |
+
output_attentions=output_attentions,
|
1172 |
+
)
|
1173 |
+
|
1174 |
+
hidden_states = outputs[0]
|
1175 |
+
if use_cache is True:
|
1176 |
+
presents = presents + (outputs[1],)
|
1177 |
+
|
1178 |
+
if output_attentions:
|
1179 |
+
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
|
1180 |
+
if self.config.add_cross_attention:
|
1181 |
+
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
|
1182 |
+
|
1183 |
+
hidden_states = self.layer_norm(hidden_states)
|
1184 |
+
|
1185 |
+
hidden_states = hidden_states.view(output_shape)
|
1186 |
+
|
1187 |
+
# Add last hidden state
|
1188 |
+
if output_hidden_states:
|
1189 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
1190 |
+
|
1191 |
+
if not return_dict:
|
1192 |
+
return tuple(
|
1193 |
+
v
|
1194 |
+
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
|
1195 |
+
if v is not None
|
1196 |
+
)
|
1197 |
+
|
1198 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
1199 |
+
last_hidden_state=hidden_states,
|
1200 |
+
past_key_values=presents,
|
1201 |
+
hidden_states=all_hidden_states,
|
1202 |
+
attentions=all_self_attentions,
|
1203 |
+
cross_attentions=all_cross_attentions,
|
1204 |
+
)
|
1205 |
+
|
1206 |
+
|
1207 |
+
@add_start_docstrings(
|
1208 |
+
"The bare Clvp decoder model outputting raw hidden-states without any specific head on top.",
|
1209 |
+
CLVP_START_DOCSTRING,
|
1210 |
+
)
|
1211 |
+
class ClvpModel(ClvpPreTrainedModel):
|
1212 |
+
def __init__(self, config: ClvpDecoderConfig):
|
1213 |
+
super().__init__(config)
|
1214 |
+
self.config = config
|
1215 |
+
self.decoder = ClvpDecoder(self.config)
|
1216 |
+
|
1217 |
+
# Initialize weights and apply final processing
|
1218 |
+
self.post_init()
|
1219 |
+
|
1220 |
+
def get_input_embeddings(self):
|
1221 |
+
return self.decoder.input_embeds_layer
|
1222 |
+
|
1223 |
+
def set_input_embeddings(self, value):
|
1224 |
+
self.decoder.input_embeds_layer = value
|
1225 |
+
|
1226 |
+
def get_decoder(self):
|
1227 |
+
return self.decoder
|
1228 |
+
|
1229 |
+
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
|
1230 |
+
def forward(
|
1231 |
+
self,
|
1232 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1233 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1234 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1235 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1236 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1237 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1238 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1239 |
+
use_cache: Optional[bool] = None,
|
1240 |
+
output_attentions: Optional[bool] = None,
|
1241 |
+
output_hidden_states: Optional[bool] = None,
|
1242 |
+
return_dict: Optional[bool] = None,
|
1243 |
+
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
|
1244 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1245 |
+
output_hidden_states = (
|
1246 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1247 |
+
)
|
1248 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1249 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1250 |
+
|
1251 |
+
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
|
1252 |
+
decoder_outputs = self.decoder(
|
1253 |
+
input_ids=input_ids,
|
1254 |
+
attention_mask=attention_mask,
|
1255 |
+
token_type_ids=token_type_ids,
|
1256 |
+
position_ids=position_ids,
|
1257 |
+
head_mask=head_mask,
|
1258 |
+
past_key_values=past_key_values,
|
1259 |
+
inputs_embeds=inputs_embeds,
|
1260 |
+
use_cache=use_cache,
|
1261 |
+
output_attentions=output_attentions,
|
1262 |
+
output_hidden_states=output_hidden_states,
|
1263 |
+
return_dict=return_dict,
|
1264 |
+
)
|
1265 |
+
|
1266 |
+
if not return_dict:
|
1267 |
+
return decoder_outputs
|
1268 |
+
|
1269 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
1270 |
+
last_hidden_state=decoder_outputs.last_hidden_state,
|
1271 |
+
past_key_values=decoder_outputs.past_key_values,
|
1272 |
+
hidden_states=decoder_outputs.hidden_states,
|
1273 |
+
attentions=decoder_outputs.attentions,
|
1274 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
1275 |
+
)
|
1276 |
+
|
1277 |
+
|
1278 |
+
@add_start_docstrings(
|
1279 |
+
"The CLVP decoder model with a language modelling head on top.",
|
1280 |
+
CLVP_START_DOCSTRING,
|
1281 |
+
)
|
1282 |
+
class ClvpForCausalLM(ClvpPreTrainedModel):
|
1283 |
+
def __init__(self, config):
|
1284 |
+
super().__init__(config)
|
1285 |
+
|
1286 |
+
self.config = config
|
1287 |
+
self.model = ClvpModel(self.config)
|
1288 |
+
|
1289 |
+
self.final_norm = nn.LayerNorm(self.config.hidden_size)
|
1290 |
+
self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True)
|
1291 |
+
|
1292 |
+
# Initialize weights and apply final processing
|
1293 |
+
self.post_init()
|
1294 |
+
|
1295 |
+
def get_input_embeddings(self):
|
1296 |
+
return self.model.decoder.input_embeds_layer
|
1297 |
+
|
1298 |
+
def set_input_embeddings(self, new_embeddings):
|
1299 |
+
self.model.decoder.input_embeds_layer = new_embeddings
|
1300 |
+
|
1301 |
+
def _prepare_model_inputs(
|
1302 |
+
self,
|
1303 |
+
inputs: Optional[torch.Tensor] = None,
|
1304 |
+
bos_token_id: Optional[int] = None,
|
1305 |
+
model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
|
1306 |
+
) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]:
|
1307 |
+
"""
|
1308 |
+
This function extracts the model-specific `inputs` for generation.
|
1309 |
+
"""
|
1310 |
+
input_name = self.main_input_name
|
1311 |
+
|
1312 |
+
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None}
|
1313 |
+
|
1314 |
+
inputs_kwarg = model_kwargs.pop(input_name, None)
|
1315 |
+
if inputs_kwarg is not None and inputs is not None:
|
1316 |
+
raise ValueError(
|
1317 |
+
f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed."
|
1318 |
+
f"Make sure to either pass {inputs} or {input_name}=..."
|
1319 |
+
)
|
1320 |
+
elif inputs_kwarg is not None:
|
1321 |
+
inputs = inputs_kwarg
|
1322 |
+
|
1323 |
+
if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
|
1324 |
+
model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
|
1325 |
+
inputs, bos_token_id, model_kwargs=model_kwargs
|
1326 |
+
)
|
1327 |
+
inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
|
1328 |
+
|
1329 |
+
# Check if conditioning_embeds are provided or not, if yes then concatenate the bos_token_id at the end of the conditioning_embeds.
|
1330 |
+
# Then we must subtract the positional_ids because during the forward pass it will be added anyways, so we must cancel them out here.
|
1331 |
+
conditioning_embeds = model_kwargs.get("conditioning_embeds", None)
|
1332 |
+
|
1333 |
+
if conditioning_embeds is not None:
|
1334 |
+
mel_start_token_embedding = self.model.decoder.input_embeds_layer(
|
1335 |
+
torch.full(
|
1336 |
+
(conditioning_embeds.shape[0], 1),
|
1337 |
+
fill_value=self.config.bos_token_id,
|
1338 |
+
device=conditioning_embeds.device,
|
1339 |
+
)
|
1340 |
+
)
|
1341 |
+
mel_start_token_embedding += self.model.decoder.position_embeds_layer(
|
1342 |
+
torch.full((conditioning_embeds.shape[0], 1), fill_value=0, device=conditioning_embeds.device)
|
1343 |
+
)
|
1344 |
+
conditioning_embeds = torch.concat([conditioning_embeds, mel_start_token_embedding], dim=1)
|
1345 |
+
|
1346 |
+
# subtract the positional_ids here
|
1347 |
+
if hasattr(model_kwargs, "attention_mask"):
|
1348 |
+
position_ids = model_kwargs["attention_mask"].long().cumsum(-1) - 1
|
1349 |
+
else:
|
1350 |
+
position_ids = torch.range(
|
1351 |
+
0, conditioning_embeds.shape[1] - 1, dtype=torch.long, device=conditioning_embeds.device
|
1352 |
+
)
|
1353 |
+
position_ids = position_ids.unsqueeze(0).repeat(conditioning_embeds.shape[0], 1)
|
1354 |
+
|
1355 |
+
model_kwargs["inputs_embeds"] = conditioning_embeds - self.model.decoder.position_embeds_layer(
|
1356 |
+
position_ids
|
1357 |
+
)
|
1358 |
+
model_kwargs["input_ids"] = (
|
1359 |
+
torch.ones((model_kwargs["inputs_embeds"].shape[0], 1), dtype=torch.long, device=self.device)
|
1360 |
+
* self.config.bos_token_id
|
1361 |
+
)
|
1362 |
+
|
1363 |
+
return model_kwargs["inputs_embeds"], "inputs_embeds", model_kwargs
|
1364 |
+
|
1365 |
+
inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
|
1366 |
+
return inputs, input_name, model_kwargs
|
1367 |
+
|
1368 |
+
def prepare_inputs_for_generation(
|
1369 |
+
self, input_ids, past_key_values=None, inputs_embeds=None, conditioning_embeds=None, **kwargs
|
1370 |
+
):
|
1371 |
+
input_ids_length = input_ids.shape[-1]
|
1372 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
1373 |
+
# only last token for inputs_ids if past is defined in kwargs
|
1374 |
+
if past_key_values:
|
1375 |
+
past_length = past_key_values[0][0].shape[2]
|
1376 |
+
|
1377 |
+
# Some generation methods already pass only the last input ID
|
1378 |
+
if input_ids.shape[1] > past_length:
|
1379 |
+
remove_prefix_length = past_length
|
1380 |
+
else:
|
1381 |
+
# Default to old behavior: keep only final ID
|
1382 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
1383 |
+
|
1384 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
1385 |
+
if token_type_ids is not None:
|
1386 |
+
token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
|
1387 |
+
|
1388 |
+
attention_mask = kwargs.get("attention_mask", None)
|
1389 |
+
position_ids = kwargs.get("position_ids", None)
|
1390 |
+
|
1391 |
+
if attention_mask is not None and position_ids is None:
|
1392 |
+
# create position_ids on the fly for batch generation
|
1393 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1394 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1395 |
+
if past_key_values:
|
1396 |
+
position_ids = position_ids[:, -1].unsqueeze(-1)
|
1397 |
+
else:
|
1398 |
+
position_ids = None
|
1399 |
+
|
1400 |
+
if conditioning_embeds is not None and past_key_values is not None:
|
1401 |
+
position_ids = torch.tensor([input_ids_length], dtype=torch.long, device=input_ids.device)
|
1402 |
+
|
1403 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1404 |
+
if inputs_embeds is not None and past_key_values is None:
|
1405 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1406 |
+
else:
|
1407 |
+
model_inputs = {"input_ids": input_ids}
|
1408 |
+
|
1409 |
+
model_inputs.update(
|
1410 |
+
{
|
1411 |
+
"past_key_values": past_key_values,
|
1412 |
+
"use_cache": kwargs.get("use_cache"),
|
1413 |
+
"position_ids": position_ids,
|
1414 |
+
"token_type_ids": token_type_ids,
|
1415 |
+
}
|
1416 |
+
)
|
1417 |
+
return model_inputs
|
1418 |
+
|
1419 |
+
@add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
|
1420 |
+
def forward(
|
1421 |
+
self,
|
1422 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1423 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1424 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1425 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1426 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1427 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1428 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1429 |
+
labels: Optional[torch.LongTensor] = None,
|
1430 |
+
use_cache: Optional[bool] = None,
|
1431 |
+
output_attentions: Optional[bool] = None,
|
1432 |
+
output_hidden_states: Optional[bool] = None,
|
1433 |
+
return_dict: Optional[bool] = None,
|
1434 |
+
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
|
1435 |
+
r"""
|
1436 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1437 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
1438 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
1439 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
1440 |
+
"""
|
1441 |
+
|
1442 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1443 |
+
output_hidden_states = (
|
1444 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1445 |
+
)
|
1446 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1447 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1448 |
+
|
1449 |
+
outputs = self.model(
|
1450 |
+
input_ids=input_ids,
|
1451 |
+
past_key_values=past_key_values,
|
1452 |
+
attention_mask=attention_mask,
|
1453 |
+
token_type_ids=token_type_ids,
|
1454 |
+
position_ids=position_ids,
|
1455 |
+
head_mask=head_mask,
|
1456 |
+
inputs_embeds=inputs_embeds,
|
1457 |
+
use_cache=use_cache,
|
1458 |
+
output_attentions=output_attentions,
|
1459 |
+
output_hidden_states=output_hidden_states,
|
1460 |
+
return_dict=return_dict,
|
1461 |
+
)
|
1462 |
+
|
1463 |
+
hidden_states = outputs[0]
|
1464 |
+
|
1465 |
+
lm_logits = self.final_norm(hidden_states)
|
1466 |
+
lm_logits = self.lm_head(lm_logits)
|
1467 |
+
|
1468 |
+
loss = None
|
1469 |
+
if labels is not None:
|
1470 |
+
labels = labels.to(lm_logits.device)
|
1471 |
+
# Shift so that tokens < n predict n
|
1472 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
1473 |
+
shift_labels = labels[..., 1:].contiguous()
|
1474 |
+
# Flatten the tokens
|
1475 |
+
loss_fct = CrossEntropyLoss()
|
1476 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
1477 |
+
|
1478 |
+
if not return_dict:
|
1479 |
+
output = (lm_logits,) + outputs[1:]
|
1480 |
+
return ((loss,) + output) if loss is not None else output
|
1481 |
+
|
1482 |
+
return CausalLMOutputWithCrossAttentions(
|
1483 |
+
loss=loss,
|
1484 |
+
logits=lm_logits,
|
1485 |
+
past_key_values=outputs.past_key_values,
|
1486 |
+
hidden_states=outputs.hidden_states,
|
1487 |
+
attentions=outputs.attentions,
|
1488 |
+
cross_attentions=outputs.cross_attentions,
|
1489 |
+
)
|
1490 |
+
|
1491 |
+
@staticmethod
|
1492 |
+
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2LMHeadModel._reorder_cache
|
1493 |
+
def _reorder_cache(
|
1494 |
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
1495 |
+
) -> Tuple[Tuple[torch.Tensor]]:
|
1496 |
+
"""
|
1497 |
+
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
|
1498 |
+
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
|
1499 |
+
beam_idx at every generation step.
|
1500 |
+
"""
|
1501 |
+
return tuple(
|
1502 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
|
1503 |
+
for layer_past in past_key_values
|
1504 |
+
)
|
1505 |
+
|
1506 |
+
|
1507 |
+
@add_start_docstrings(
|
1508 |
+
"The composite CLVP model with a text encoder, speech encoder and speech decoder model."
|
1509 |
+
"The speech decoder model generates the speech_ids from the text and the text encoder and speech encoder works"
|
1510 |
+
"together to filter out the best speech_ids.",
|
1511 |
+
CLVP_START_DOCSTRING,
|
1512 |
+
)
|
1513 |
+
class ClvpModelForConditionalGeneration(ClvpPreTrainedModel):
|
1514 |
+
config_class = ClvpConfig
|
1515 |
+
|
1516 |
+
def __init__(self, config: ClvpConfig):
|
1517 |
+
super().__init__(config)
|
1518 |
+
|
1519 |
+
if not isinstance(config.text_config, ClvpEncoderConfig):
|
1520 |
+
raise ValueError(
|
1521 |
+
"config.text_config is expected to be of type `ClvpEncoderConfig` but is of type"
|
1522 |
+
f" {type(config.text_config)}."
|
1523 |
+
)
|
1524 |
+
|
1525 |
+
if not isinstance(config.speech_config, ClvpEncoderConfig):
|
1526 |
+
raise ValueError(
|
1527 |
+
"config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type"
|
1528 |
+
f" {type(config.speech_config)}."
|
1529 |
+
)
|
1530 |
+
|
1531 |
+
if not isinstance(config.decoder_config, ClvpDecoderConfig):
|
1532 |
+
raise ValueError(
|
1533 |
+
"config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type"
|
1534 |
+
f" {type(config.decoder_config)}."
|
1535 |
+
)
|
1536 |
+
|
1537 |
+
self.conditioning_encoder = ClvpConditioningEncoder(config)
|
1538 |
+
|
1539 |
+
self.speech_decoder_model = ClvpForCausalLM(config.decoder_config)
|
1540 |
+
|
1541 |
+
self.text_encoder_model = ClvpEncoder(config.text_config)
|
1542 |
+
self.speech_encoder_model = ClvpEncoder(config.speech_config)
|
1543 |
+
|
1544 |
+
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
1545 |
+
|
1546 |
+
# Initialize weights and apply final processing
|
1547 |
+
self.post_init()
|
1548 |
+
|
1549 |
+
# taken from the original repo,
|
1550 |
+
# link : https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/api.py#L117
|
1551 |
+
def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor:
|
1552 |
+
"""
|
1553 |
+
This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the
|
1554 |
+
last few tokens of each sequence.
|
1555 |
+
|
1556 |
+
Args:
|
1557 |
+
speech_ids (`torch.LongTensor`):
|
1558 |
+
This refers to the output of the decoder model.
|
1559 |
+
"""
|
1560 |
+
decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes
|
1561 |
+
speech_ids = speech_ids[:, 1:]
|
1562 |
+
|
1563 |
+
stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0)
|
1564 |
+
speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0])
|
1565 |
+
|
1566 |
+
for i, each_seq_stop_token_index in enumerate(stop_token_indices):
|
1567 |
+
# This means that no stop tokens were found so the sentence was still being generated, in that case we don't need
|
1568 |
+
# to apply any padding so just skip to the next sequence of tokens.
|
1569 |
+
if each_seq_stop_token_index.sum() == 0:
|
1570 |
+
continue
|
1571 |
+
|
1572 |
+
stm = each_seq_stop_token_index.argmax()
|
1573 |
+
speech_ids[i, stm:] = decoder_fixing_codes[0]
|
1574 |
+
if stm - 3 < speech_ids.shape[1]:
|
1575 |
+
speech_ids[i, -3:] = torch.tensor(
|
1576 |
+
[decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long
|
1577 |
+
)
|
1578 |
+
|
1579 |
+
return speech_ids
|
1580 |
+
|
1581 |
+
def get_text_features(
|
1582 |
+
self,
|
1583 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1584 |
+
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1585 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
1586 |
+
) -> torch.FloatTensor:
|
1587 |
+
r"""
|
1588 |
+
This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the
|
1589 |
+
projection layer to the pooled output of the CLVP text encoder model.
|
1590 |
+
|
1591 |
+
Args:
|
1592 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
1593 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
1594 |
+
provide it.
|
1595 |
+
|
1596 |
+
[What are input IDs?](../glossary#input-ids)
|
1597 |
+
text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
|
1598 |
+
inputs_embeds for the text encoder model passed in place of `input_ids`.
|
1599 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1600 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
1601 |
+
|
1602 |
+
- 1 for tokens that are **not masked**,
|
1603 |
+
- 0 for tokens that are **masked**.
|
1604 |
+
|
1605 |
+
[What are attention masks?](../glossary#attention-mask)
|
1606 |
+
|
1607 |
+
Returns:
|
1608 |
+
`torch.FloatTensor` of shape `(batch_size, output_dim)`:
|
1609 |
+
The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text
|
1610 |
+
Model.
|
1611 |
+
|
1612 |
+
Examples:
|
1613 |
+
|
1614 |
+
```python
|
1615 |
+
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
|
1616 |
+
|
1617 |
+
>>> # Define the Text
|
1618 |
+
>>> text = "This is an example text."
|
1619 |
+
|
1620 |
+
>>> # Define processor and model
|
1621 |
+
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
|
1622 |
+
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
|
1623 |
+
|
1624 |
+
>>> # Generate processor output and text embeds
|
1625 |
+
>>> processor_output = processor(text=text, return_tensors="pt")
|
1626 |
+
>>> text_embeds = model.get_text_features(input_ids=processor_output["input_ids"])
|
1627 |
+
```
|
1628 |
+
"""
|
1629 |
+
|
1630 |
+
outputs = self.text_encoder_model(
|
1631 |
+
input_ids=input_ids,
|
1632 |
+
inputs_embeds=text_encoder_inputs_embeds,
|
1633 |
+
attention_mask=attention_mask,
|
1634 |
+
)
|
1635 |
+
|
1636 |
+
return outputs[0]
|
1637 |
+
|
1638 |
+
def get_speech_features(
|
1639 |
+
self,
|
1640 |
+
speech_ids: Optional[torch.LongTensor] = None,
|
1641 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1642 |
+
input_features: Optional[torch.FloatTensor] = None,
|
1643 |
+
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1644 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1645 |
+
generation_config: Optional[GenerationConfig] = None,
|
1646 |
+
**kwargs,
|
1647 |
+
) -> torch.FloatTensor:
|
1648 |
+
r"""
|
1649 |
+
This method can be used to extract speech_embeds. The speech embeddings are obtained by applying the speech
|
1650 |
+
model on speech_ids. If speech_ids is not present but both input_ids and input_features are given then the
|
1651 |
+
decoder model will be used to first generate the speech_ids and then applying the speech model.
|
1652 |
+
|
1653 |
+
Args:
|
1654 |
+
speech_ids (`torch.LongTensor` of shape `(batch_size, num_speech_ids)`, *optional*):
|
1655 |
+
Speech Tokens. Padding will be ignored by default should you provide it. If speech_ids are provided
|
1656 |
+
then input_ids and input_features will be automatically ignored.
|
1657 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1658 |
+
Input text Tokens. Processed from the [`ClvpTokenizer`]. If speech_ids is not provided, then input_ids
|
1659 |
+
and input_features will be used.
|
1660 |
+
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
|
1661 |
+
Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`]. If
|
1662 |
+
speech_ids is not provided, then input_ids and input_features will be used.
|
1663 |
+
conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
|
1664 |
+
inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
|
1665 |
+
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1666 |
+
Mask to avoid performing attention on padding speech token indices. Mask values selected in `[0, 1]`:
|
1667 |
+
|
1668 |
+
- 1 for tokens that are **not masked**,
|
1669 |
+
- 0 for tokens that are **masked**.
|
1670 |
+
|
1671 |
+
[What are attention masks?](../glossary#attention-mask)
|
1672 |
+
generation_config (`GenerationConfig`, *optional*):
|
1673 |
+
generation config to control the generation of speech_ids if they are not provided.
|
1674 |
+
|
1675 |
+
Returns:
|
1676 |
+
`torch.FloatTensor` of shape `(batch_size, output_dim)`:
|
1677 |
+
The speech embeddings obtained by applying the projection layer to the pooled output of the CLVP Speech
|
1678 |
+
Model.
|
1679 |
+
|
1680 |
+
Examples:
|
1681 |
+
|
1682 |
+
```python
|
1683 |
+
>>> import datasets
|
1684 |
+
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
|
1685 |
+
|
1686 |
+
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
|
1687 |
+
>>> text = "This is an example text."
|
1688 |
+
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
1689 |
+
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
|
1690 |
+
>>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
|
1691 |
+
|
1692 |
+
>>> # Define processor and model
|
1693 |
+
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
|
1694 |
+
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
|
1695 |
+
|
1696 |
+
>>> # Generate processor output and model output
|
1697 |
+
>>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
|
1698 |
+
>>> speech_embeds = model.get_speech_features(
|
1699 |
+
... input_ids=processor_output["input_ids"], input_features=processor_output["input_features"]
|
1700 |
+
... )
|
1701 |
+
```
|
1702 |
+
"""
|
1703 |
+
|
1704 |
+
if speech_ids is None:
|
1705 |
+
if (input_ids is None and conditioning_encoder_inputs_embeds is None) or input_features is None:
|
1706 |
+
raise ValueError(
|
1707 |
+
"Either speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided."
|
1708 |
+
)
|
1709 |
+
|
1710 |
+
if generation_config is None:
|
1711 |
+
generation_config = self.generation_config
|
1712 |
+
generation_config.update(**kwargs)
|
1713 |
+
|
1714 |
+
conditioning_embeds = self.conditioning_encoder(
|
1715 |
+
input_features=input_features,
|
1716 |
+
input_ids=input_ids,
|
1717 |
+
inputs_embeds=conditioning_encoder_inputs_embeds,
|
1718 |
+
attention_mask=attention_mask,
|
1719 |
+
)
|
1720 |
+
|
1721 |
+
speech_ids = self.speech_decoder_model.generate(
|
1722 |
+
conditioning_embeds=conditioning_embeds,
|
1723 |
+
generation_config=generation_config,
|
1724 |
+
)
|
1725 |
+
|
1726 |
+
speech_ids = self.fix_speech_decoder_output(speech_ids[0])
|
1727 |
+
|
1728 |
+
outputs = self.speech_encoder_model(
|
1729 |
+
input_ids=speech_ids,
|
1730 |
+
attention_mask=attention_mask,
|
1731 |
+
)
|
1732 |
+
|
1733 |
+
return outputs[0]
|
1734 |
+
|
1735 |
+
@add_start_docstrings_to_model_forward(CLVP_INPUTS_DOCSTRING)
|
1736 |
+
@replace_return_docstrings(output_type=ClvpOutput, config_class=ClvpConfig)
|
1737 |
+
def forward(
|
1738 |
+
self,
|
1739 |
+
input_ids: torch.LongTensor = None,
|
1740 |
+
input_features: torch.FloatTensor = None,
|
1741 |
+
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1742 |
+
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1743 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
1744 |
+
return_loss: Optional[bool] = None,
|
1745 |
+
output_hidden_states: Optional[bool] = None,
|
1746 |
+
output_attentions: Optional[bool] = False,
|
1747 |
+
return_dict: Optional[bool] = None,
|
1748 |
+
) -> Union[Tuple, ClvpOutput]:
|
1749 |
+
r"""
|
1750 |
+
Returns:
|
1751 |
+
|
1752 |
+
Examples:
|
1753 |
+
|
1754 |
+
```python
|
1755 |
+
>>> import datasets
|
1756 |
+
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
|
1757 |
+
|
1758 |
+
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
|
1759 |
+
>>> text = "This is an example text."
|
1760 |
+
|
1761 |
+
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
1762 |
+
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
|
1763 |
+
>>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
|
1764 |
+
|
1765 |
+
>>> # Define processor and model
|
1766 |
+
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
|
1767 |
+
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
|
1768 |
+
|
1769 |
+
>>> # processor outputs and model outputs
|
1770 |
+
>>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
|
1771 |
+
>>> outputs = model(
|
1772 |
+
... input_ids=processor_output["input_ids"],
|
1773 |
+
... input_features=processor_output["input_features"],
|
1774 |
+
... return_dict=True,
|
1775 |
+
... )
|
1776 |
+
```
|
1777 |
+
"""
|
1778 |
+
|
1779 |
+
# Use CLVP model's config for some fields (if specified) instead of those of speech & text components.
|
1780 |
+
output_hidden_states = (
|
1781 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1782 |
+
)
|
1783 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1784 |
+
|
1785 |
+
conditioning_embeds = self.conditioning_encoder(
|
1786 |
+
input_features=input_features,
|
1787 |
+
input_ids=input_ids,
|
1788 |
+
inputs_embeds=conditioning_encoder_inputs_embeds,
|
1789 |
+
attention_mask=attention_mask,
|
1790 |
+
)
|
1791 |
+
|
1792 |
+
decoder_outputs = self.speech_decoder_model(
|
1793 |
+
inputs_embeds=conditioning_embeds,
|
1794 |
+
output_hidden_states=output_hidden_states,
|
1795 |
+
return_dict=return_dict,
|
1796 |
+
)
|
1797 |
+
|
1798 |
+
speech_ids = decoder_outputs[0]
|
1799 |
+
|
1800 |
+
# since we will get the embeds of shape `(batch_size, seq_len, embedding_dim)` during the forward pass
|
1801 |
+
# we must convert it to tokens, to make it compaitable with speech_transformer
|
1802 |
+
if speech_ids.ndim == 3:
|
1803 |
+
speech_ids = speech_ids.argmax(2)
|
1804 |
+
speech_ids = self.fix_speech_decoder_output(speech_ids)
|
1805 |
+
|
1806 |
+
speech_outputs = self.speech_encoder_model(
|
1807 |
+
input_ids=speech_ids,
|
1808 |
+
output_hidden_states=output_hidden_states,
|
1809 |
+
return_dict=return_dict,
|
1810 |
+
)
|
1811 |
+
|
1812 |
+
text_outputs = self.text_encoder_model(
|
1813 |
+
input_ids=input_ids,
|
1814 |
+
inputs_embeds=text_encoder_inputs_embeds,
|
1815 |
+
attention_mask=attention_mask,
|
1816 |
+
output_hidden_states=output_hidden_states,
|
1817 |
+
return_dict=return_dict,
|
1818 |
+
)
|
1819 |
+
|
1820 |
+
speech_embeds = speech_outputs[0]
|
1821 |
+
text_embeds = text_outputs[0]
|
1822 |
+
|
1823 |
+
# normalized features
|
1824 |
+
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
|
1825 |
+
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
|
1826 |
+
|
1827 |
+
# cosine similarity as logits
|
1828 |
+
logit_scale = self.logit_scale.exp()
|
1829 |
+
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
|
1830 |
+
logits_per_speech = logits_per_text.t()
|
1831 |
+
|
1832 |
+
loss = None
|
1833 |
+
if return_loss:
|
1834 |
+
loss = clvp_loss(logits_per_text)
|
1835 |
+
|
1836 |
+
if not return_dict:
|
1837 |
+
output = (
|
1838 |
+
logits_per_speech,
|
1839 |
+
logits_per_text,
|
1840 |
+
text_embeds,
|
1841 |
+
speech_embeds,
|
1842 |
+
text_outputs[2],
|
1843 |
+
speech_outputs[2],
|
1844 |
+
)
|
1845 |
+
if output_hidden_states:
|
1846 |
+
output += (
|
1847 |
+
decoder_outputs[-1],
|
1848 |
+
text_outputs[-1],
|
1849 |
+
speech_outputs[-1],
|
1850 |
+
)
|
1851 |
+
|
1852 |
+
return ((loss,) + output) if loss is not None else output
|
1853 |
+
|
1854 |
+
return ClvpOutput(
|
1855 |
+
loss=loss,
|
1856 |
+
logits_per_speech=logits_per_speech,
|
1857 |
+
logits_per_text=logits_per_text,
|
1858 |
+
text_embeds=text_embeds,
|
1859 |
+
speech_embeds=speech_embeds,
|
1860 |
+
text_model_output=text_outputs[2],
|
1861 |
+
speech_model_output=speech_outputs[2],
|
1862 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
1863 |
+
text_encoder_hidden_states=text_outputs.hidden_states,
|
1864 |
+
speech_encoder_hidden_states=speech_outputs.hidden_states,
|
1865 |
+
)
|
1866 |
+
|
1867 |
+
@torch.no_grad()
|
1868 |
+
def generate(
|
1869 |
+
self,
|
1870 |
+
input_ids: torch.LongTensor = None,
|
1871 |
+
input_features: torch.FloatTensor = None,
|
1872 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
1873 |
+
generation_config: Optional[GenerationConfig] = None,
|
1874 |
+
pad_to_max_mel_tokens: Optional[int] = None,
|
1875 |
+
output_hidden_states: Optional[bool] = None,
|
1876 |
+
**kwargs,
|
1877 |
+
):
|
1878 |
+
"""
|
1879 |
+
Generate method for `ClvpModelForConditionalGeneration`, this method calls the `generate` method of
|
1880 |
+
`ClvpForCausalLM` and then uses those generated `speech_ids` to process `text_embeds` and `speech_embeds` using
|
1881 |
+
`ClvpEncoder`.
|
1882 |
+
|
1883 |
+
Args:
|
1884 |
+
input_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1885 |
+
Input text Tokens. Processed from the [`ClvpTokenizer`].
|
1886 |
+
input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
|
1887 |
+
Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`].
|
1888 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1889 |
+
Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:
|
1890 |
+
|
1891 |
+
- 1 for tokens that are **not masked**,
|
1892 |
+
- 0 for tokens that are **masked**.
|
1893 |
+
|
1894 |
+
[What are attention masks?](../glossary#attention-mask)
|
1895 |
+
generation_config (`~generation.GenerationConfig`, *optional*):
|
1896 |
+
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
|
1897 |
+
passed to generate matching the attributes of `generation_config` will override them. If
|
1898 |
+
`generation_config` is not provided, the default will be used, which had the following loading
|
1899 |
+
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
|
1900 |
+
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
|
1901 |
+
default values, whose documentation should be checked to parameterize generation.
|
1902 |
+
pad_to_max_mel_tokens (`int`, *optional*):
|
1903 |
+
Pads generated speech_ids to the specified value. This is to implement the same logic from the official
|
1904 |
+
repo, link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
|
1905 |
+
and to make sure the logits are same.
|
1906 |
+
This does not affect generation quality so please don't consider using it since it is less efficient.
|
1907 |
+
output_hidden_states (`bool`, *optional*):
|
1908 |
+
Whether or not to return the hidden states of decoder model, text encoder and speech encoder models.
|
1909 |
+
|
1910 |
+
Returns:
|
1911 |
+
`ClvpOutput` or tuple: A `ClvpOutput` (if `return_dict_in_generate=True` or when
|
1912 |
+
`config.return_dict_in_generate=True`) or a tuple.
|
1913 |
+
"""
|
1914 |
+
|
1915 |
+
# If the input sequences are larger than (self.config.decoder_config.max_text_tokens - 3) then raise error,
|
1916 |
+
# because we need to add 3 tokens ( 1 bos tokens and 2 eos tokens) to the input_ids in ClvpConditioningEncoder to
|
1917 |
+
# properly sample
|
1918 |
+
sequence_length = input_ids.shape[-1]
|
1919 |
+
if sequence_length > (self.config.decoder_config.max_text_tokens - 3):
|
1920 |
+
raise ValueError(
|
1921 |
+
f"Maximum sequence length reached! Found input_ids of length {sequence_length}."
|
1922 |
+
f"Please make sure that the maximum length of input_ids is {self.config.decoder_config.max_text_tokens - 3}"
|
1923 |
+
)
|
1924 |
+
|
1925 |
+
if generation_config is None:
|
1926 |
+
generation_config = self.generation_config
|
1927 |
+
|
1928 |
+
generation_config = copy.deepcopy(generation_config)
|
1929 |
+
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
|
1930 |
+
generation_config.validate()
|
1931 |
+
self._validate_model_kwargs(model_kwargs.copy())
|
1932 |
+
|
1933 |
+
# pad input_ids as specified in the original repo
|
1934 |
+
# link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L380
|
1935 |
+
input_ids, attention_mask = _pad_extra_bos_eos_tokens(
|
1936 |
+
input_ids,
|
1937 |
+
attention_mask,
|
1938 |
+
add_bos_token=False,
|
1939 |
+
bos_token_id=self.config.text_config.bos_token_id,
|
1940 |
+
eos_token_id=self.config.text_config.eos_token_id,
|
1941 |
+
)
|
1942 |
+
|
1943 |
+
conditioning_embeds = self.conditioning_encoder(
|
1944 |
+
input_features=input_features,
|
1945 |
+
input_ids=input_ids,
|
1946 |
+
attention_mask=attention_mask,
|
1947 |
+
)
|
1948 |
+
|
1949 |
+
decoder_outputs = self.speech_decoder_model.generate(
|
1950 |
+
conditioning_embeds=conditioning_embeds,
|
1951 |
+
generation_config=generation_config,
|
1952 |
+
output_hidden_states=output_hidden_states,
|
1953 |
+
return_dict=generation_config.return_dict_in_generate,
|
1954 |
+
)
|
1955 |
+
if isinstance(decoder_outputs, ModelOutput):
|
1956 |
+
speech_ids = decoder_outputs.sequences
|
1957 |
+
|
1958 |
+
# pad to pad_to_max_mel_tokens if given, to replicate the original repo logic
|
1959 |
+
# link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
|
1960 |
+
if pad_to_max_mel_tokens is not None:
|
1961 |
+
padding_needed = pad_to_max_mel_tokens - speech_ids.shape[-1]
|
1962 |
+
speech_ids = torch.nn.functional.pad(
|
1963 |
+
speech_ids, (0, padding_needed), value=self.generation_config.eos_token_id
|
1964 |
+
)
|
1965 |
+
|
1966 |
+
speech_ids = self.fix_speech_decoder_output(speech_ids)
|
1967 |
+
|
1968 |
+
speech_outputs = self.speech_encoder_model(
|
1969 |
+
input_ids=speech_ids,
|
1970 |
+
output_hidden_states=output_hidden_states,
|
1971 |
+
return_dict=generation_config.return_dict_in_generate,
|
1972 |
+
)
|
1973 |
+
text_outputs = self.text_encoder_model(
|
1974 |
+
input_ids=input_ids,
|
1975 |
+
attention_mask=attention_mask,
|
1976 |
+
output_hidden_states=output_hidden_states,
|
1977 |
+
return_dict=generation_config.return_dict_in_generate,
|
1978 |
+
)
|
1979 |
+
|
1980 |
+
speech_embeds = speech_outputs[0]
|
1981 |
+
text_embeds = text_outputs[0]
|
1982 |
+
|
1983 |
+
# normalized features
|
1984 |
+
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
|
1985 |
+
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
|
1986 |
+
|
1987 |
+
# cosine similarity as logits
|
1988 |
+
logit_scale = self.logit_scale.exp()
|
1989 |
+
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
|
1990 |
+
logits_per_speech = logits_per_text.t()
|
1991 |
+
|
1992 |
+
if not generation_config.return_dict_in_generate:
|
1993 |
+
output = (
|
1994 |
+
speech_ids,
|
1995 |
+
logits_per_speech,
|
1996 |
+
logits_per_text,
|
1997 |
+
text_embeds,
|
1998 |
+
speech_embeds,
|
1999 |
+
text_outputs[2],
|
2000 |
+
speech_outputs[2],
|
2001 |
+
)
|
2002 |
+
if output_hidden_states:
|
2003 |
+
output += (
|
2004 |
+
decoder_outputs[-1],
|
2005 |
+
text_outputs[-1],
|
2006 |
+
speech_outputs[-1],
|
2007 |
+
)
|
2008 |
+
|
2009 |
+
return output
|
2010 |
+
|
2011 |
+
return ClvpOutput(
|
2012 |
+
speech_ids=speech_ids,
|
2013 |
+
logits_per_speech=logits_per_speech,
|
2014 |
+
logits_per_text=logits_per_text,
|
2015 |
+
text_embeds=text_embeds,
|
2016 |
+
speech_embeds=speech_embeds,
|
2017 |
+
text_model_output=text_outputs[2],
|
2018 |
+
speech_model_output=speech_outputs[2],
|
2019 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
2020 |
+
text_encoder_hidden_states=text_outputs.hidden_states,
|
2021 |
+
speech_encoder_hidden_states=speech_outputs.hidden_states,
|
2022 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""English Normalizer class for CLVP."""
|
17 |
+
|
18 |
+
|
19 |
+
import re
|
20 |
+
|
21 |
+
|
22 |
+
class EnglishNormalizer:
|
23 |
+
def __init__(self):
|
24 |
+
# List of (regular expression, replacement) pairs for abbreviations:
|
25 |
+
self._abbreviations = [
|
26 |
+
(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
|
27 |
+
for x in [
|
28 |
+
("mrs", "misess"),
|
29 |
+
("mr", "mister"),
|
30 |
+
("dr", "doctor"),
|
31 |
+
("st", "saint"),
|
32 |
+
("co", "company"),
|
33 |
+
("jr", "junior"),
|
34 |
+
("maj", "major"),
|
35 |
+
("gen", "general"),
|
36 |
+
("drs", "doctors"),
|
37 |
+
("rev", "reverend"),
|
38 |
+
("lt", "lieutenant"),
|
39 |
+
("hon", "honorable"),
|
40 |
+
("sgt", "sergeant"),
|
41 |
+
("capt", "captain"),
|
42 |
+
("esq", "esquire"),
|
43 |
+
("ltd", "limited"),
|
44 |
+
("col", "colonel"),
|
45 |
+
("ft", "fort"),
|
46 |
+
]
|
47 |
+
]
|
48 |
+
|
49 |
+
self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
|
50 |
+
self.teens = [
|
51 |
+
"ten",
|
52 |
+
"eleven",
|
53 |
+
"twelve",
|
54 |
+
"thirteen",
|
55 |
+
"fourteen",
|
56 |
+
"fifteen",
|
57 |
+
"sixteen",
|
58 |
+
"seventeen",
|
59 |
+
"eighteen",
|
60 |
+
"nineteen",
|
61 |
+
]
|
62 |
+
self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
|
63 |
+
|
64 |
+
def number_to_words(self, num: int) -> str:
|
65 |
+
"""
|
66 |
+
Converts numbers(`int`) to words(`str`).
|
67 |
+
|
68 |
+
Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine
|
69 |
+
trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine
|
70 |
+
thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`.
|
71 |
+
"""
|
72 |
+
if num == 0:
|
73 |
+
return "zero"
|
74 |
+
elif num < 0:
|
75 |
+
return "minus " + self.number_to_words(abs(num))
|
76 |
+
elif num < 10:
|
77 |
+
return self.ones[num]
|
78 |
+
elif num < 20:
|
79 |
+
return self.teens[num - 10]
|
80 |
+
elif num < 100:
|
81 |
+
return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "")
|
82 |
+
elif num < 1000:
|
83 |
+
return (
|
84 |
+
self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "")
|
85 |
+
)
|
86 |
+
elif num < 1_000_000:
|
87 |
+
return (
|
88 |
+
self.number_to_words(num // 1000)
|
89 |
+
+ " thousand"
|
90 |
+
+ (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "")
|
91 |
+
)
|
92 |
+
elif num < 1_000_000_000:
|
93 |
+
return (
|
94 |
+
self.number_to_words(num // 1_000_000)
|
95 |
+
+ " million"
|
96 |
+
+ (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "")
|
97 |
+
)
|
98 |
+
elif num < 1_000_000_000_000:
|
99 |
+
return (
|
100 |
+
self.number_to_words(num // 1_000_000_000)
|
101 |
+
+ " billion"
|
102 |
+
+ (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "")
|
103 |
+
)
|
104 |
+
elif num < 1_000_000_000_000_000:
|
105 |
+
return (
|
106 |
+
self.number_to_words(num // 1_000_000_000_000)
|
107 |
+
+ " trillion"
|
108 |
+
+ (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "")
|
109 |
+
)
|
110 |
+
elif num < 1_000_000_000_000_000_000:
|
111 |
+
return (
|
112 |
+
self.number_to_words(num // 1_000_000_000_000_000)
|
113 |
+
+ " quadrillion"
|
114 |
+
+ (
|
115 |
+
", " + self.number_to_words(num % 1_000_000_000_000_000)
|
116 |
+
if num % 1_000_000_000_000_000 != 0
|
117 |
+
else ""
|
118 |
+
)
|
119 |
+
)
|
120 |
+
else:
|
121 |
+
return "number out of range"
|
122 |
+
|
123 |
+
def convert_to_ascii(self, text: str) -> str:
|
124 |
+
"""
|
125 |
+
Converts unicode to ascii
|
126 |
+
"""
|
127 |
+
return text.encode("ascii", "ignore").decode("utf-8")
|
128 |
+
|
129 |
+
def _expand_dollars(self, m: str) -> str:
|
130 |
+
"""
|
131 |
+
This method is used to expand numerical dollar values into spoken words.
|
132 |
+
"""
|
133 |
+
match = m.group(1)
|
134 |
+
parts = match.split(".")
|
135 |
+
if len(parts) > 2:
|
136 |
+
return match + " dollars" # Unexpected format
|
137 |
+
|
138 |
+
dollars = int(parts[0]) if parts[0] else 0
|
139 |
+
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
|
140 |
+
if dollars and cents:
|
141 |
+
dollar_unit = "dollar" if dollars == 1 else "dollars"
|
142 |
+
cent_unit = "cent" if cents == 1 else "cents"
|
143 |
+
return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
|
144 |
+
elif dollars:
|
145 |
+
dollar_unit = "dollar" if dollars == 1 else "dollars"
|
146 |
+
return "%s %s" % (dollars, dollar_unit)
|
147 |
+
elif cents:
|
148 |
+
cent_unit = "cent" if cents == 1 else "cents"
|
149 |
+
return "%s %s" % (cents, cent_unit)
|
150 |
+
else:
|
151 |
+
return "zero dollars"
|
152 |
+
|
153 |
+
def _remove_commas(self, m: str) -> str:
|
154 |
+
"""
|
155 |
+
This method is used to remove commas from sentences.
|
156 |
+
"""
|
157 |
+
return m.group(1).replace(",", "")
|
158 |
+
|
159 |
+
def _expand_decimal_point(self, m: str) -> str:
|
160 |
+
"""
|
161 |
+
This method is used to expand '.' into spoken word ' point '.
|
162 |
+
"""
|
163 |
+
return m.group(1).replace(".", " point ")
|
164 |
+
|
165 |
+
def _expand_ordinal(self, num: str) -> str:
|
166 |
+
"""
|
167 |
+
This method is used to expand ordinals such as '1st', '2nd' into spoken words.
|
168 |
+
"""
|
169 |
+
ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"}
|
170 |
+
|
171 |
+
num = int(num.group(0)[:-2])
|
172 |
+
if 10 <= num % 100 and num % 100 <= 20:
|
173 |
+
suffix = "th"
|
174 |
+
else:
|
175 |
+
suffix = ordinal_suffixes.get(num % 10, "th")
|
176 |
+
return self.number_to_words(num) + suffix
|
177 |
+
|
178 |
+
def _expand_number(self, m: str) -> str:
|
179 |
+
"""
|
180 |
+
This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository,
|
181 |
+
link :
|
182 |
+
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86)
|
183 |
+
"""
|
184 |
+
num = int(m.group(0))
|
185 |
+
|
186 |
+
if num > 1000 and num < 3000:
|
187 |
+
if num == 2000:
|
188 |
+
return "two thousand"
|
189 |
+
elif num > 2000 and num < 2010:
|
190 |
+
return "two thousand " + self.number_to_words(num % 100)
|
191 |
+
elif num % 100 == 0:
|
192 |
+
return self.number_to_words(num // 100) + " hundred"
|
193 |
+
else:
|
194 |
+
return self.number_to_words(num)
|
195 |
+
else:
|
196 |
+
return self.number_to_words(num)
|
197 |
+
|
198 |
+
def normalize_numbers(self, text: str) -> str:
|
199 |
+
"""
|
200 |
+
This method is used to normalize numbers within a text such as converting the numbers to words, removing
|
201 |
+
commas, etc.
|
202 |
+
"""
|
203 |
+
text = re.sub(re.compile(r"([0-9][0-9\,]+[0-9])"), self._remove_commas, text)
|
204 |
+
text = re.sub(re.compile(r"£([0-9\,]*[0-9]+)"), r"\1 pounds", text)
|
205 |
+
text = re.sub(re.compile(r"\$([0-9\.\,]*[0-9]+)"), self._expand_dollars, text)
|
206 |
+
text = re.sub(re.compile(r"([0-9]+\.[0-9]+)"), self._expand_decimal_point, text)
|
207 |
+
text = re.sub(re.compile(r"[0-9]+(st|nd|rd|th)"), self._expand_ordinal, text)
|
208 |
+
text = re.sub(re.compile(r"[0-9]+"), self._expand_number, text)
|
209 |
+
return text
|
210 |
+
|
211 |
+
def expand_abbreviations(self, text: str) -> str:
|
212 |
+
"""
|
213 |
+
Expands the abbreviate words.
|
214 |
+
"""
|
215 |
+
for regex, replacement in self._abbreviations:
|
216 |
+
text = re.sub(regex, replacement, text)
|
217 |
+
return text
|
218 |
+
|
219 |
+
def collapse_whitespace(self, text: str) -> str:
|
220 |
+
"""
|
221 |
+
Removes multiple whitespaces
|
222 |
+
"""
|
223 |
+
return re.sub(re.compile(r"\s+"), " ", text)
|
224 |
+
|
225 |
+
def __call__(self, text):
|
226 |
+
"""
|
227 |
+
Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands
|
228 |
+
abbreviations
|
229 |
+
"""
|
230 |
+
|
231 |
+
text = self.convert_to_ascii(text)
|
232 |
+
text = text.lower()
|
233 |
+
text = self.normalize_numbers(text)
|
234 |
+
text = self.expand_abbreviations(text)
|
235 |
+
text = self.collapse_whitespace(text)
|
236 |
+
text = text.replace('"', "")
|
237 |
+
|
238 |
+
return text
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""
|
17 |
+
Processor class for CLVP
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
from ...processing_utils import ProcessorMixin
|
22 |
+
|
23 |
+
|
24 |
+
class ClvpProcessor(ProcessorMixin):
|
25 |
+
r"""
|
26 |
+
Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor.
|
27 |
+
|
28 |
+
[`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the
|
29 |
+
[`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
feature_extractor (`ClvpFeatureExtractor`):
|
33 |
+
An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input.
|
34 |
+
tokenizer (`ClvpTokenizer`):
|
35 |
+
An instance of [`ClvpTokenizer`]. The tokenizer is a required input.
|
36 |
+
"""
|
37 |
+
|
38 |
+
feature_extractor_class = "ClvpFeatureExtractor"
|
39 |
+
tokenizer_class = "ClvpTokenizer"
|
40 |
+
model_input_names = [
|
41 |
+
"input_ids",
|
42 |
+
"input_features",
|
43 |
+
"attention_mask",
|
44 |
+
]
|
45 |
+
|
46 |
+
def __init__(self, feature_extractor, tokenizer):
|
47 |
+
super().__init__(feature_extractor, tokenizer)
|
48 |
+
|
49 |
+
def __call__(self, *args, **kwargs):
|
50 |
+
"""
|
51 |
+
Forwards the `audio` and `sampling_rate` arguments to [`~ClvpFeatureExtractor.__call__`] and the `text`
|
52 |
+
argument to [`~ClvpTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
|
53 |
+
information.
|
54 |
+
"""
|
55 |
+
|
56 |
+
raw_speech = kwargs.pop("raw_speech", None)
|
57 |
+
sampling_rate = kwargs.pop("sampling_rate", None)
|
58 |
+
text = kwargs.pop("text", None)
|
59 |
+
|
60 |
+
if raw_speech is None and text is None:
|
61 |
+
raise ValueError("You need to specify either an `raw_speech` or `text` input to process.")
|
62 |
+
|
63 |
+
if raw_speech is not None:
|
64 |
+
inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs)
|
65 |
+
if text is not None:
|
66 |
+
encodings = self.tokenizer(text, **kwargs)
|
67 |
+
|
68 |
+
if text is None:
|
69 |
+
return inputs
|
70 |
+
elif raw_speech is None:
|
71 |
+
return encodings
|
72 |
+
else:
|
73 |
+
inputs["input_ids"] = encodings["input_ids"]
|
74 |
+
inputs["attention_mask"] = encodings["attention_mask"]
|
75 |
+
return inputs
|
76 |
+
|
77 |
+
# Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp
|
78 |
+
def batch_decode(self, *args, **kwargs):
|
79 |
+
"""
|
80 |
+
This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
|
81 |
+
refer to the docstring of this method for more information.
|
82 |
+
"""
|
83 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
84 |
+
|
85 |
+
# Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp
|
86 |
+
def decode(self, *args, **kwargs):
|
87 |
+
"""
|
88 |
+
This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
|
89 |
+
the docstring of this method for more information.
|
90 |
+
"""
|
91 |
+
return self.tokenizer.decode(*args, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/tokenization_clvp.py
ADDED
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization class for CLVP."""
|
16 |
+
|
17 |
+
import json
|
18 |
+
import os
|
19 |
+
from functools import lru_cache
|
20 |
+
from typing import List, Optional, Tuple
|
21 |
+
|
22 |
+
import regex as re
|
23 |
+
|
24 |
+
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
|
25 |
+
from ...utils import logging
|
26 |
+
from .number_normalizer import EnglishNormalizer
|
27 |
+
|
28 |
+
|
29 |
+
logger = logging.get_logger(__name__)
|
30 |
+
|
31 |
+
VOCAB_FILES_NAMES = {
|
32 |
+
"vocab_file": "vocab.json",
|
33 |
+
"merges_file": "merges.txt",
|
34 |
+
}
|
35 |
+
|
36 |
+
|
37 |
+
@lru_cache()
|
38 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
|
39 |
+
def bytes_to_unicode():
|
40 |
+
"""
|
41 |
+
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
|
42 |
+
characters the bpe code barfs on.
|
43 |
+
|
44 |
+
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
|
45 |
+
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
|
46 |
+
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
|
47 |
+
tables between utf-8 bytes and unicode strings.
|
48 |
+
"""
|
49 |
+
bs = (
|
50 |
+
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
|
51 |
+
)
|
52 |
+
cs = bs[:]
|
53 |
+
n = 0
|
54 |
+
for b in range(2**8):
|
55 |
+
if b not in bs:
|
56 |
+
bs.append(b)
|
57 |
+
cs.append(2**8 + n)
|
58 |
+
n += 1
|
59 |
+
cs = [chr(n) for n in cs]
|
60 |
+
return dict(zip(bs, cs))
|
61 |
+
|
62 |
+
|
63 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
|
64 |
+
def get_pairs(word):
|
65 |
+
"""
|
66 |
+
Return set of symbol pairs in a word.
|
67 |
+
|
68 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
69 |
+
"""
|
70 |
+
pairs = set()
|
71 |
+
prev_char = word[0]
|
72 |
+
for char in word[1:]:
|
73 |
+
pairs.add((prev_char, char))
|
74 |
+
prev_char = char
|
75 |
+
return pairs
|
76 |
+
|
77 |
+
|
78 |
+
class ClvpTokenizer(PreTrainedTokenizer):
|
79 |
+
"""
|
80 |
+
Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding.
|
81 |
+
|
82 |
+
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
|
83 |
+
be encoded differently whether it is at the beginning of the sentence (without space) or not:
|
84 |
+
|
85 |
+
```python
|
86 |
+
>>> from transformers import ClvpTokenizer
|
87 |
+
|
88 |
+
>>> tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev")
|
89 |
+
>>> tokenizer("Hello world")["input_ids"]
|
90 |
+
[62, 84, 28, 2, 179, 79]
|
91 |
+
|
92 |
+
>>> tokenizer(" Hello world")["input_ids"]
|
93 |
+
[2, 62, 84, 28, 2, 179, 79]
|
94 |
+
```
|
95 |
+
|
96 |
+
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
|
97 |
+
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
|
98 |
+
|
99 |
+
<Tip>
|
100 |
+
|
101 |
+
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
|
102 |
+
|
103 |
+
</Tip>
|
104 |
+
|
105 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
106 |
+
this superclass for more information regarding those methods.
|
107 |
+
|
108 |
+
Args:
|
109 |
+
vocab_file (`str`):
|
110 |
+
Path to the vocabulary file.
|
111 |
+
merges_file (`str`):
|
112 |
+
Path to the merges file.
|
113 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
114 |
+
Paradigm to follow when decoding bytes to UTF-8. See
|
115 |
+
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
|
116 |
+
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
|
117 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
118 |
+
token instead.
|
119 |
+
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
120 |
+
The beginning of sequence token.
|
121 |
+
eos_token (`str`, *optional*, defaults to `"[STOP]"`):
|
122 |
+
The end of sequence token.
|
123 |
+
pad_token (`str`, *optional*, defaults to `"[STOP]"`):
|
124 |
+
The pad token of the sequence.
|
125 |
+
add_prefix_space (`bool`, *optional*, defaults to `False`):
|
126 |
+
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
|
127 |
+
other word. (CLVP tokenizer detect beginning of words by the preceding space).
|
128 |
+
add_bos_token (`bool`, *optional*, defaults to `False`):
|
129 |
+
Whether to add `bos_token` in front of the sequence when add_special_tokens=True.
|
130 |
+
add_eos_token (`bool`, *optional*, defaults to `False`):
|
131 |
+
Whether to add `eos_token` in end of the sequence when add_special_tokens=True.
|
132 |
+
"""
|
133 |
+
|
134 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
135 |
+
model_input_names = [
|
136 |
+
"input_ids",
|
137 |
+
"attention_mask",
|
138 |
+
]
|
139 |
+
|
140 |
+
def __init__(
|
141 |
+
self,
|
142 |
+
vocab_file,
|
143 |
+
merges_file,
|
144 |
+
errors="replace",
|
145 |
+
unk_token="[UNK]",
|
146 |
+
bos_token="<|endoftext|>",
|
147 |
+
eos_token="[STOP]",
|
148 |
+
pad_token="[STOP]",
|
149 |
+
add_prefix_space=False,
|
150 |
+
add_bos_token=False,
|
151 |
+
add_eos_token=False,
|
152 |
+
**kwargs,
|
153 |
+
):
|
154 |
+
bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
|
155 |
+
eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
|
156 |
+
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
|
157 |
+
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
|
158 |
+
|
159 |
+
self.add_bos_token = add_bos_token
|
160 |
+
self.add_eos_token = add_eos_token
|
161 |
+
self._normalizer = None
|
162 |
+
|
163 |
+
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
164 |
+
self.encoder = json.load(vocab_handle)
|
165 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
166 |
+
self.errors = errors # how to handle errors in decoding
|
167 |
+
self.byte_encoder = bytes_to_unicode()
|
168 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
169 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
170 |
+
bpe_merges = merges_handle.read().split("\n")[1:-1]
|
171 |
+
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
|
172 |
+
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
|
173 |
+
self.cache = {}
|
174 |
+
self.add_prefix_space = add_prefix_space
|
175 |
+
|
176 |
+
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
|
177 |
+
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
|
178 |
+
|
179 |
+
super().__init__(
|
180 |
+
errors=errors,
|
181 |
+
unk_token=unk_token,
|
182 |
+
bos_token=bos_token,
|
183 |
+
eos_token=eos_token,
|
184 |
+
pad_token=pad_token,
|
185 |
+
add_prefix_space=add_prefix_space,
|
186 |
+
add_bos_token=add_bos_token,
|
187 |
+
add_eos_token=add_eos_token,
|
188 |
+
**kwargs,
|
189 |
+
)
|
190 |
+
|
191 |
+
@property
|
192 |
+
def vocab_size(self):
|
193 |
+
return len(self.encoder)
|
194 |
+
|
195 |
+
@property
|
196 |
+
def normalizer(self):
|
197 |
+
if self._normalizer is None:
|
198 |
+
self._normalizer = EnglishNormalizer()
|
199 |
+
return self._normalizer
|
200 |
+
|
201 |
+
def get_vocab(self):
|
202 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
203 |
+
|
204 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
|
205 |
+
def bpe(self, token):
|
206 |
+
if token in self.cache:
|
207 |
+
return self.cache[token]
|
208 |
+
word = tuple(token)
|
209 |
+
pairs = get_pairs(word)
|
210 |
+
|
211 |
+
if not pairs:
|
212 |
+
return token
|
213 |
+
|
214 |
+
while True:
|
215 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
216 |
+
if bigram not in self.bpe_ranks:
|
217 |
+
break
|
218 |
+
first, second = bigram
|
219 |
+
new_word = []
|
220 |
+
i = 0
|
221 |
+
while i < len(word):
|
222 |
+
try:
|
223 |
+
j = word.index(first, i)
|
224 |
+
except ValueError:
|
225 |
+
new_word.extend(word[i:])
|
226 |
+
break
|
227 |
+
else:
|
228 |
+
new_word.extend(word[i:j])
|
229 |
+
i = j
|
230 |
+
|
231 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
232 |
+
new_word.append(first + second)
|
233 |
+
i += 2
|
234 |
+
else:
|
235 |
+
new_word.append(word[i])
|
236 |
+
i += 1
|
237 |
+
new_word = tuple(new_word)
|
238 |
+
word = new_word
|
239 |
+
if len(word) == 1:
|
240 |
+
break
|
241 |
+
else:
|
242 |
+
pairs = get_pairs(word)
|
243 |
+
word = " ".join(word)
|
244 |
+
self.cache[token] = word
|
245 |
+
return word
|
246 |
+
|
247 |
+
# Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
|
248 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
249 |
+
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
|
250 |
+
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
251 |
+
|
252 |
+
output = bos_token_id + token_ids_0 + eos_token_id
|
253 |
+
|
254 |
+
if token_ids_1 is not None:
|
255 |
+
output = output + bos_token_id + token_ids_1 + eos_token_id
|
256 |
+
|
257 |
+
return output
|
258 |
+
|
259 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_special_tokens_mask
|
260 |
+
def get_special_tokens_mask(
|
261 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
262 |
+
) -> List[int]:
|
263 |
+
"""
|
264 |
+
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
265 |
+
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
|
266 |
+
|
267 |
+
Args:
|
268 |
+
token_ids_0 (`List[int]`):
|
269 |
+
List of IDs.
|
270 |
+
token_ids_1 (`List[int]`, *optional*):
|
271 |
+
Optional second list of IDs for sequence pairs.
|
272 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
273 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
274 |
+
|
275 |
+
Returns:
|
276 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
277 |
+
"""
|
278 |
+
if already_has_special_tokens:
|
279 |
+
return super().get_special_tokens_mask(
|
280 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
281 |
+
)
|
282 |
+
|
283 |
+
if not self.add_bos_token:
|
284 |
+
return super().get_special_tokens_mask(
|
285 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False
|
286 |
+
)
|
287 |
+
|
288 |
+
if token_ids_1 is None:
|
289 |
+
return [1] + ([0] * len(token_ids_0))
|
290 |
+
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
|
291 |
+
|
292 |
+
def _tokenize(self, text):
|
293 |
+
"""Tokenize a string."""
|
294 |
+
bpe_tokens = []
|
295 |
+
text = self.normalizer(text)
|
296 |
+
for token in re.findall(self.pat, text):
|
297 |
+
token = "".join(
|
298 |
+
self.byte_encoder[b] for b in token.encode("utf-8")
|
299 |
+
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
|
300 |
+
|
301 |
+
# if the token is "Ġ" we replace it with "[SPACE]" (if "[SPACE]" is present in the vocab), otherwise we keep the "Ġ".
|
302 |
+
bpe_tokens.extend(
|
303 |
+
"[SPACE]" if bpe_token == "\u0120" and "[SPACE]" in self.encoder.keys() else bpe_token
|
304 |
+
for bpe_token in self.bpe(token).split(" ")
|
305 |
+
)
|
306 |
+
|
307 |
+
return bpe_tokens
|
308 |
+
|
309 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
|
310 |
+
def _convert_token_to_id(self, token):
|
311 |
+
"""Converts a token (str) in an id using the vocab."""
|
312 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
313 |
+
|
314 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
|
315 |
+
def _convert_id_to_token(self, index):
|
316 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
317 |
+
return self.decoder.get(index)
|
318 |
+
|
319 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
|
320 |
+
def convert_tokens_to_string(self, tokens):
|
321 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
322 |
+
text = "".join(tokens)
|
323 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
|
324 |
+
return text
|
325 |
+
|
326 |
+
def clean_up_tokenization(self, text):
|
327 |
+
text = "".join(text)
|
328 |
+
vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys())
|
329 |
+
|
330 |
+
text = text.replace("[SPACE]", " ") if "[SPACE]" in vocab_tokens else text
|
331 |
+
text = text.replace("[STOP]", " ") if "[STOP]" in vocab_tokens else text
|
332 |
+
|
333 |
+
text = text.replace(self.unk_token, "").replace(" ", " ").replace(" ", " ")
|
334 |
+
return text
|
335 |
+
|
336 |
+
# Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
|
337 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
338 |
+
if not os.path.isdir(save_directory):
|
339 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
340 |
+
return
|
341 |
+
vocab_file = os.path.join(
|
342 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
343 |
+
)
|
344 |
+
merge_file = os.path.join(
|
345 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
346 |
+
)
|
347 |
+
|
348 |
+
with open(vocab_file, "w", encoding="utf-8") as f:
|
349 |
+
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
350 |
+
|
351 |
+
index = 0
|
352 |
+
with open(merge_file, "w", encoding="utf-8") as writer:
|
353 |
+
writer.write("#version: 0.2\n")
|
354 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
355 |
+
if index != token_index:
|
356 |
+
logger.warning(
|
357 |
+
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
358 |
+
" Please check that the tokenizer is not corrupted!"
|
359 |
+
)
|
360 |
+
index = token_index
|
361 |
+
writer.write(" ".join(bpe_tokens) + "\n")
|
362 |
+
index += 1
|
363 |
+
|
364 |
+
return vocab_file, merge_file
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__init__.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_torch_available,
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
_import_structure = {
|
24 |
+
"configuration_dinov2": ["DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Dinov2Config", "Dinov2OnnxConfig"]
|
25 |
+
}
|
26 |
+
|
27 |
+
try:
|
28 |
+
if not is_torch_available():
|
29 |
+
raise OptionalDependencyNotAvailable()
|
30 |
+
except OptionalDependencyNotAvailable:
|
31 |
+
pass
|
32 |
+
else:
|
33 |
+
_import_structure["modeling_dinov2"] = [
|
34 |
+
"DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST",
|
35 |
+
"Dinov2ForImageClassification",
|
36 |
+
"Dinov2Model",
|
37 |
+
"Dinov2PreTrainedModel",
|
38 |
+
"Dinov2Backbone",
|
39 |
+
]
|
40 |
+
|
41 |
+
if TYPE_CHECKING:
|
42 |
+
from .configuration_dinov2 import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Dinov2Config, Dinov2OnnxConfig
|
43 |
+
|
44 |
+
try:
|
45 |
+
if not is_torch_available():
|
46 |
+
raise OptionalDependencyNotAvailable()
|
47 |
+
except OptionalDependencyNotAvailable:
|
48 |
+
pass
|
49 |
+
else:
|
50 |
+
from .modeling_dinov2 import (
|
51 |
+
DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST,
|
52 |
+
Dinov2Backbone,
|
53 |
+
Dinov2ForImageClassification,
|
54 |
+
Dinov2Model,
|
55 |
+
Dinov2PreTrainedModel,
|
56 |
+
)
|
57 |
+
|
58 |
+
else:
|
59 |
+
import sys
|
60 |
+
|
61 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/convert_dinov2_to_hf.py
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert DINOv2 checkpoints from the original repository.
|
16 |
+
|
17 |
+
URL: https://github.com/facebookresearch/dinov2/tree/main
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
import argparse
|
22 |
+
import json
|
23 |
+
from pathlib import Path
|
24 |
+
|
25 |
+
import requests
|
26 |
+
import torch
|
27 |
+
import torch.nn as nn
|
28 |
+
from huggingface_hub import hf_hub_download
|
29 |
+
from PIL import Image
|
30 |
+
from torchvision import transforms
|
31 |
+
|
32 |
+
from transformers import BitImageProcessor, Dinov2Config, Dinov2ForImageClassification, Dinov2Model
|
33 |
+
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
|
34 |
+
from transformers.utils import logging
|
35 |
+
|
36 |
+
|
37 |
+
logging.set_verbosity_info()
|
38 |
+
logger = logging.get_logger(__name__)
|
39 |
+
|
40 |
+
|
41 |
+
def get_dinov2_config(model_name, image_classifier=False):
|
42 |
+
config = Dinov2Config(image_size=518, patch_size=14)
|
43 |
+
|
44 |
+
# size of the architecture
|
45 |
+
if "vits" in model_name:
|
46 |
+
config.hidden_size = 384
|
47 |
+
config.num_attention_heads = 6
|
48 |
+
elif "vitb" in model_name:
|
49 |
+
pass
|
50 |
+
elif "vitl" in model_name:
|
51 |
+
config.hidden_size = 1024
|
52 |
+
config.num_hidden_layers = 24
|
53 |
+
config.num_attention_heads = 16
|
54 |
+
elif "vitg" in model_name:
|
55 |
+
config.use_swiglu_ffn = True
|
56 |
+
config.hidden_size = 1536
|
57 |
+
config.num_hidden_layers = 40
|
58 |
+
config.num_attention_heads = 24
|
59 |
+
else:
|
60 |
+
raise ValueError("Model not supported")
|
61 |
+
|
62 |
+
if image_classifier:
|
63 |
+
repo_id = "huggingface/label-files"
|
64 |
+
filename = "imagenet-1k-id2label.json"
|
65 |
+
config.num_labels = 1000
|
66 |
+
config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
67 |
+
config.id2label = {int(k): v for k, v in config.id2label.items()}
|
68 |
+
|
69 |
+
return config
|
70 |
+
|
71 |
+
|
72 |
+
def create_rename_keys(config):
|
73 |
+
rename_keys = []
|
74 |
+
# fmt: off
|
75 |
+
|
76 |
+
# patch embedding layer
|
77 |
+
rename_keys.append(("cls_token", "embeddings.cls_token"))
|
78 |
+
rename_keys.append(("mask_token", "embeddings.mask_token"))
|
79 |
+
rename_keys.append(("pos_embed", "embeddings.position_embeddings"))
|
80 |
+
rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight"))
|
81 |
+
rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias"))
|
82 |
+
|
83 |
+
for i in range(config.num_hidden_layers):
|
84 |
+
# layernorms
|
85 |
+
rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight"))
|
86 |
+
rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias"))
|
87 |
+
rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight"))
|
88 |
+
rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias"))
|
89 |
+
# MLP
|
90 |
+
if config.use_swiglu_ffn:
|
91 |
+
rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight"))
|
92 |
+
rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias"))
|
93 |
+
rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight"))
|
94 |
+
rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias"))
|
95 |
+
else:
|
96 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight"))
|
97 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias"))
|
98 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight"))
|
99 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias"))
|
100 |
+
# layerscale
|
101 |
+
rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1"))
|
102 |
+
rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1"))
|
103 |
+
# attention projection layer
|
104 |
+
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight"))
|
105 |
+
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias"))
|
106 |
+
|
107 |
+
# final layernorm
|
108 |
+
rename_keys.append(("norm.weight", "layernorm.weight"))
|
109 |
+
rename_keys.append(("norm.bias", "layernorm.bias"))
|
110 |
+
|
111 |
+
# fmt: on
|
112 |
+
return rename_keys
|
113 |
+
|
114 |
+
|
115 |
+
def rename_key(dct, old, new):
|
116 |
+
val = dct.pop(old)
|
117 |
+
dct[new] = val
|
118 |
+
|
119 |
+
|
120 |
+
# we split up the matrix of each encoder layer into queries, keys and values
|
121 |
+
def read_in_q_k_v(state_dict, config):
|
122 |
+
for i in range(config.num_hidden_layers):
|
123 |
+
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
|
124 |
+
in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
|
125 |
+
in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
|
126 |
+
# next, add query, keys and values (in that order) to the state dict
|
127 |
+
state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
|
128 |
+
state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
|
129 |
+
state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
|
130 |
+
config.hidden_size : config.hidden_size * 2, :
|
131 |
+
]
|
132 |
+
state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
|
133 |
+
config.hidden_size : config.hidden_size * 2
|
134 |
+
]
|
135 |
+
state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :]
|
136 |
+
state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
|
137 |
+
|
138 |
+
|
139 |
+
# We will verify our results on an image of cute cats
|
140 |
+
def prepare_img():
|
141 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
142 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
143 |
+
return image
|
144 |
+
|
145 |
+
|
146 |
+
@torch.no_grad()
|
147 |
+
def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
|
148 |
+
"""
|
149 |
+
Copy/paste/tweak model's weights to our DINOv2 structure.
|
150 |
+
"""
|
151 |
+
|
152 |
+
# define default Dinov2 configuration
|
153 |
+
image_classifier = "1layer" in model_name
|
154 |
+
config = get_dinov2_config(model_name, image_classifier=image_classifier)
|
155 |
+
|
156 |
+
# load original model from torch hub
|
157 |
+
original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", ""))
|
158 |
+
original_model.eval()
|
159 |
+
|
160 |
+
# load state_dict of original model, remove and rename some keys
|
161 |
+
state_dict = original_model.state_dict()
|
162 |
+
rename_keys = create_rename_keys(config)
|
163 |
+
for src, dest in rename_keys:
|
164 |
+
rename_key(state_dict, src, dest)
|
165 |
+
read_in_q_k_v(state_dict, config)
|
166 |
+
|
167 |
+
for key, val in state_dict.copy().items():
|
168 |
+
val = state_dict.pop(key)
|
169 |
+
if "w12" in key:
|
170 |
+
key = key.replace("w12", "weights_in")
|
171 |
+
if "w3" in key:
|
172 |
+
key = key.replace("w3", "weights_out")
|
173 |
+
state_dict[key] = val
|
174 |
+
|
175 |
+
# load HuggingFace model
|
176 |
+
if image_classifier:
|
177 |
+
model = Dinov2ForImageClassification(config).eval()
|
178 |
+
model.dinov2.load_state_dict(state_dict)
|
179 |
+
model_name_to_classifier_dict_url = {
|
180 |
+
"dinov2_vits14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth",
|
181 |
+
"dinov2_vitb14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth",
|
182 |
+
"dinov2_vitl14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth",
|
183 |
+
"dinov2_vitg14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth",
|
184 |
+
}
|
185 |
+
url = model_name_to_classifier_dict_url[model_name]
|
186 |
+
classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu")
|
187 |
+
model.classifier.weight = nn.Parameter(classifier_state_dict["weight"])
|
188 |
+
model.classifier.bias = nn.Parameter(classifier_state_dict["bias"])
|
189 |
+
else:
|
190 |
+
model = Dinov2Model(config).eval()
|
191 |
+
model.load_state_dict(state_dict)
|
192 |
+
|
193 |
+
# load image
|
194 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
195 |
+
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
|
196 |
+
|
197 |
+
# preprocess image
|
198 |
+
transformations = transforms.Compose(
|
199 |
+
[
|
200 |
+
transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC),
|
201 |
+
transforms.CenterCrop(224),
|
202 |
+
transforms.ToTensor(),
|
203 |
+
transforms.Normalize(
|
204 |
+
mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values
|
205 |
+
std=IMAGENET_DEFAULT_STD, # across a large photo dataset.
|
206 |
+
),
|
207 |
+
]
|
208 |
+
)
|
209 |
+
|
210 |
+
original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension
|
211 |
+
|
212 |
+
processor = BitImageProcessor(
|
213 |
+
size={"shortest_edge": 256},
|
214 |
+
resample=PILImageResampling.BICUBIC,
|
215 |
+
image_mean=IMAGENET_DEFAULT_MEAN,
|
216 |
+
image_std=IMAGENET_DEFAULT_STD,
|
217 |
+
)
|
218 |
+
pixel_values = processor(image, return_tensors="pt").pixel_values
|
219 |
+
|
220 |
+
assert torch.allclose(original_pixel_values, pixel_values)
|
221 |
+
|
222 |
+
with torch.no_grad():
|
223 |
+
outputs = model(pixel_values, output_hidden_states=True)
|
224 |
+
original_outputs = original_model(pixel_values)
|
225 |
+
|
226 |
+
# assert values
|
227 |
+
if image_classifier:
|
228 |
+
print("Predicted class:")
|
229 |
+
class_idx = outputs.logits.argmax(-1).item()
|
230 |
+
print(model.config.id2label[class_idx])
|
231 |
+
else:
|
232 |
+
assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape
|
233 |
+
assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3)
|
234 |
+
print("Looks ok!")
|
235 |
+
|
236 |
+
if pytorch_dump_folder_path is not None:
|
237 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
238 |
+
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
|
239 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
240 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
241 |
+
processor.save_pretrained(pytorch_dump_folder_path)
|
242 |
+
|
243 |
+
if push_to_hub:
|
244 |
+
model_name_to_hf_name = {
|
245 |
+
"dinov2_vits14": "dinov2-small",
|
246 |
+
"dinov2_vitb14": "dinov2-base",
|
247 |
+
"dinov2_vitl14": "dinov2-large",
|
248 |
+
"dinov2_vitg14": "dinov2-giant",
|
249 |
+
"dinov2_vits14_1layer": "dinov2-small-imagenet1k-1-layer",
|
250 |
+
"dinov2_vitb14_1layer": "dinov2-base-imagenet1k-1-layer",
|
251 |
+
"dinov2_vitl14_1layer": "dinov2-large-imagenet1k-1-layer",
|
252 |
+
"dinov2_vitg14_1layer": "dinov2-giant-imagenet1k-1-layer",
|
253 |
+
}
|
254 |
+
|
255 |
+
name = model_name_to_hf_name[model_name]
|
256 |
+
model.push_to_hub(f"facebook/{name}")
|
257 |
+
processor.push_to_hub(f"facebook/{name}")
|
258 |
+
|
259 |
+
|
260 |
+
if __name__ == "__main__":
|
261 |
+
parser = argparse.ArgumentParser()
|
262 |
+
# Required parameters
|
263 |
+
parser.add_argument(
|
264 |
+
"--model_name",
|
265 |
+
default="dinov2_vitb14",
|
266 |
+
type=str,
|
267 |
+
choices=[
|
268 |
+
"dinov2_vits14",
|
269 |
+
"dinov2_vitb14",
|
270 |
+
"dinov2_vitl14",
|
271 |
+
"dinov2_vitg14",
|
272 |
+
"dinov2_vits14_1layer",
|
273 |
+
"dinov2_vitb14_1layer",
|
274 |
+
"dinov2_vitl14_1layer",
|
275 |
+
"dinov2_vitg14_1layer",
|
276 |
+
],
|
277 |
+
help="Name of the model you'd like to convert.",
|
278 |
+
)
|
279 |
+
parser.add_argument(
|
280 |
+
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
|
281 |
+
)
|
282 |
+
parser.add_argument(
|
283 |
+
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
|
284 |
+
)
|
285 |
+
|
286 |
+
args = parser.parse_args()
|
287 |
+
convert_dinov2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dit/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (196 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc
ADDED
Binary file (6.45 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert DiT checkpoints from the unilm repository."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import json
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import requests
|
23 |
+
import torch
|
24 |
+
from huggingface_hub import hf_hub_download
|
25 |
+
from PIL import Image
|
26 |
+
|
27 |
+
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
|
28 |
+
from transformers.image_utils import PILImageResampling
|
29 |
+
from transformers.utils import logging
|
30 |
+
|
31 |
+
|
32 |
+
logging.set_verbosity_info()
|
33 |
+
logger = logging.get_logger(__name__)
|
34 |
+
|
35 |
+
|
36 |
+
# here we list all keys to be renamed (original name on the left, our name on the right)
|
37 |
+
def create_rename_keys(config, has_lm_head=False, is_semantic=False):
|
38 |
+
prefix = "backbone." if is_semantic else ""
|
39 |
+
|
40 |
+
rename_keys = []
|
41 |
+
for i in range(config.num_hidden_layers):
|
42 |
+
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
|
43 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
|
44 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
|
45 |
+
rename_keys.append(
|
46 |
+
(f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
|
47 |
+
)
|
48 |
+
rename_keys.append(
|
49 |
+
(f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
|
50 |
+
)
|
51 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
|
52 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
|
53 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
|
54 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
|
55 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
|
56 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
|
57 |
+
|
58 |
+
# projection layer + position embeddings
|
59 |
+
rename_keys.extend(
|
60 |
+
[
|
61 |
+
(f"{prefix}cls_token", "beit.embeddings.cls_token"),
|
62 |
+
(f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
|
63 |
+
(f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
|
64 |
+
(f"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
|
65 |
+
]
|
66 |
+
)
|
67 |
+
|
68 |
+
if has_lm_head:
|
69 |
+
# mask token + layernorm
|
70 |
+
rename_keys.extend(
|
71 |
+
[
|
72 |
+
("mask_token", "beit.embeddings.mask_token"),
|
73 |
+
("norm.weight", "layernorm.weight"),
|
74 |
+
("norm.bias", "layernorm.bias"),
|
75 |
+
]
|
76 |
+
)
|
77 |
+
else:
|
78 |
+
# layernorm + classification head
|
79 |
+
rename_keys.extend(
|
80 |
+
[
|
81 |
+
("fc_norm.weight", "beit.pooler.layernorm.weight"),
|
82 |
+
("fc_norm.bias", "beit.pooler.layernorm.bias"),
|
83 |
+
("head.weight", "classifier.weight"),
|
84 |
+
("head.bias", "classifier.bias"),
|
85 |
+
]
|
86 |
+
)
|
87 |
+
|
88 |
+
return rename_keys
|
89 |
+
|
90 |
+
|
91 |
+
# we split up the matrix of each encoder layer into queries, keys and values
|
92 |
+
def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
|
93 |
+
for i in range(config.num_hidden_layers):
|
94 |
+
prefix = "backbone." if is_semantic else ""
|
95 |
+
# queries, keys and values
|
96 |
+
in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
|
97 |
+
q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
|
98 |
+
v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
|
99 |
+
|
100 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
|
101 |
+
: config.hidden_size, :
|
102 |
+
]
|
103 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
|
104 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
|
105 |
+
config.hidden_size : config.hidden_size * 2, :
|
106 |
+
]
|
107 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
|
108 |
+
-config.hidden_size :, :
|
109 |
+
]
|
110 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
|
111 |
+
|
112 |
+
# gamma_1 and gamma_2
|
113 |
+
# we call them lambda because otherwise they are renamed when using .from_pretrained
|
114 |
+
gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
|
115 |
+
gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
|
116 |
+
|
117 |
+
state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
|
118 |
+
state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
|
119 |
+
|
120 |
+
|
121 |
+
def rename_key(dct, old, new):
|
122 |
+
val = dct.pop(old)
|
123 |
+
dct[new] = val
|
124 |
+
|
125 |
+
|
126 |
+
# We will verify our results on an image of cute cats
|
127 |
+
def prepare_img():
|
128 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
129 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
130 |
+
return im
|
131 |
+
|
132 |
+
|
133 |
+
@torch.no_grad()
|
134 |
+
def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False):
|
135 |
+
"""
|
136 |
+
Copy/paste/tweak model's weights to our BEiT structure.
|
137 |
+
"""
|
138 |
+
|
139 |
+
# define default BEiT configuration
|
140 |
+
has_lm_head = False if "rvlcdip" in checkpoint_url else True
|
141 |
+
config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head)
|
142 |
+
|
143 |
+
# size of the architecture
|
144 |
+
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
|
145 |
+
config.hidden_size = 1024
|
146 |
+
config.intermediate_size = 4096
|
147 |
+
config.num_hidden_layers = 24
|
148 |
+
config.num_attention_heads = 16
|
149 |
+
|
150 |
+
# labels
|
151 |
+
if "rvlcdip" in checkpoint_url:
|
152 |
+
config.num_labels = 16
|
153 |
+
repo_id = "huggingface/label-files"
|
154 |
+
filename = "rvlcdip-id2label.json"
|
155 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
156 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
157 |
+
config.id2label = id2label
|
158 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
159 |
+
|
160 |
+
# load state_dict of original model, remove and rename some keys
|
161 |
+
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
|
162 |
+
|
163 |
+
rename_keys = create_rename_keys(config, has_lm_head=has_lm_head)
|
164 |
+
for src, dest in rename_keys:
|
165 |
+
rename_key(state_dict, src, dest)
|
166 |
+
read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head)
|
167 |
+
|
168 |
+
# load HuggingFace model
|
169 |
+
model = BeitForMaskedImageModeling(config) if has_lm_head else BeitForImageClassification(config)
|
170 |
+
model.eval()
|
171 |
+
model.load_state_dict(state_dict)
|
172 |
+
|
173 |
+
# Check outputs on an image
|
174 |
+
image_processor = BeitImageProcessor(
|
175 |
+
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
|
176 |
+
)
|
177 |
+
image = prepare_img()
|
178 |
+
|
179 |
+
encoding = image_processor(images=image, return_tensors="pt")
|
180 |
+
pixel_values = encoding["pixel_values"]
|
181 |
+
|
182 |
+
outputs = model(pixel_values)
|
183 |
+
logits = outputs.logits
|
184 |
+
|
185 |
+
# verify logits
|
186 |
+
expected_shape = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
|
187 |
+
assert logits.shape == torch.Size(expected_shape), "Shape of logits not as expected"
|
188 |
+
|
189 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
190 |
+
print(f"Saving model to {pytorch_dump_folder_path}")
|
191 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
192 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
193 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
194 |
+
|
195 |
+
if push_to_hub:
|
196 |
+
if has_lm_head:
|
197 |
+
model_name = "dit-base" if "base" in checkpoint_url else "dit-large"
|
198 |
+
else:
|
199 |
+
model_name = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
|
200 |
+
image_processor.push_to_hub(
|
201 |
+
repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
|
202 |
+
organization="nielsr",
|
203 |
+
commit_message="Add image processor",
|
204 |
+
use_temp_dir=True,
|
205 |
+
)
|
206 |
+
model.push_to_hub(
|
207 |
+
repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
|
208 |
+
organization="nielsr",
|
209 |
+
commit_message="Add model",
|
210 |
+
use_temp_dir=True,
|
211 |
+
)
|
212 |
+
|
213 |
+
|
214 |
+
if __name__ == "__main__":
|
215 |
+
parser = argparse.ArgumentParser()
|
216 |
+
|
217 |
+
parser.add_argument(
|
218 |
+
"--checkpoint_url",
|
219 |
+
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
|
220 |
+
type=str,
|
221 |
+
help="URL to the original PyTorch checkpoint (.pth file).",
|
222 |
+
)
|
223 |
+
parser.add_argument(
|
224 |
+
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
|
225 |
+
)
|
226 |
+
parser.add_argument(
|
227 |
+
"--push_to_hub",
|
228 |
+
action="store_true",
|
229 |
+
)
|
230 |
+
args = parser.parse_args()
|
231 |
+
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__init__.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import (
|
18 |
+
OptionalDependencyNotAvailable,
|
19 |
+
_LazyModule,
|
20 |
+
is_torch_available,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
_import_structure = {
|
25 |
+
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
|
26 |
+
}
|
27 |
+
|
28 |
+
try:
|
29 |
+
if not is_torch_available():
|
30 |
+
raise OptionalDependencyNotAvailable()
|
31 |
+
except OptionalDependencyNotAvailable:
|
32 |
+
pass
|
33 |
+
else:
|
34 |
+
_import_structure["modeling_falcon"] = [
|
35 |
+
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
|
36 |
+
"FalconForCausalLM",
|
37 |
+
"FalconModel",
|
38 |
+
"FalconPreTrainedModel",
|
39 |
+
"FalconForSequenceClassification",
|
40 |
+
"FalconForTokenClassification",
|
41 |
+
"FalconForQuestionAnswering",
|
42 |
+
]
|
43 |
+
|
44 |
+
|
45 |
+
if TYPE_CHECKING:
|
46 |
+
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
|
47 |
+
|
48 |
+
try:
|
49 |
+
if not is_torch_available():
|
50 |
+
raise OptionalDependencyNotAvailable()
|
51 |
+
except OptionalDependencyNotAvailable:
|
52 |
+
pass
|
53 |
+
else:
|
54 |
+
from .modeling_falcon import (
|
55 |
+
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
|
56 |
+
FalconForCausalLM,
|
57 |
+
FalconForQuestionAnswering,
|
58 |
+
FalconForSequenceClassification,
|
59 |
+
FalconForTokenClassification,
|
60 |
+
FalconModel,
|
61 |
+
FalconPreTrainedModel,
|
62 |
+
)
|
63 |
+
|
64 |
+
|
65 |
+
else:
|
66 |
+
import sys
|
67 |
+
|
68 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.07 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc
ADDED
Binary file (8.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/convert_custom_code_checkpoint.cpython-310.pyc
ADDED
Binary file (2.07 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/modeling_falcon.cpython-310.pyc
ADDED
Binary file (44.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/configuration_falcon.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Falcon configuration"""
|
16 |
+
|
17 |
+
from ...configuration_utils import PretrainedConfig
|
18 |
+
from ...utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from ..deprecated._archive_maps import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class FalconConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon
|
30 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
31 |
+
defaults will yield a similar configuration to that of the
|
32 |
+
[tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_size (`int`, *optional*, defaults to 65024):
|
40 |
+
Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the
|
41 |
+
`inputs_ids` passed when calling [`FalconModel`]
|
42 |
+
hidden_size (`int`, *optional*, defaults to 4544):
|
43 |
+
Dimension of the hidden representations.
|
44 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
45 |
+
Number of hidden layers in the Transformer decoder.
|
46 |
+
num_attention_heads (`int`, *optional*, defaults to 71):
|
47 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
48 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
|
49 |
+
The epsilon used by the layer normalization layers.
|
50 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
51 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
52 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
53 |
+
Whether the model should return the last key/values attentions (not used by all models). Only relevant if
|
54 |
+
`config.is_decoder=True`.
|
55 |
+
hidden_dropout (`float`, *optional*, defaults to 0.0):
|
56 |
+
The dropout probability for MLP layers.
|
57 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
58 |
+
The dropout probability for attention layers.
|
59 |
+
num_kv_heads (`int`, *optional*):
|
60 |
+
Number of key-value heads to use per attention layer. If unset, defaults to the same value as
|
61 |
+
`num_attention_heads`.
|
62 |
+
alibi (`bool`, *optional*, defaults to `False`):
|
63 |
+
Whether to use ALiBi positional biases during self-attention.
|
64 |
+
new_decoder_architecture (`bool`, *optional*, defaults to `False`):
|
65 |
+
Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn`
|
66 |
+
arguments are ignored, as the new decoder always uses parallel attention.
|
67 |
+
multi_query (`bool`, *optional*, defaults to `True`):
|
68 |
+
Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`.
|
69 |
+
parallel_attn (`bool`, *optional*, defaults to `True`):
|
70 |
+
Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive
|
71 |
+
instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`.
|
72 |
+
bias (`bool`, *optional*, defaults to `False`):
|
73 |
+
Whether to use bias on Linear layers.
|
74 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
75 |
+
The maximum sequence length that this model might ever be used with, when `alibi` is `False`. Pretrained
|
76 |
+
Falcon models with RoPE support up to 2048 tokens.
|
77 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
78 |
+
The base period of the RoPE embeddings.
|
79 |
+
rope_scaling (`Dict`, *optional*):
|
80 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
81 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
82 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
83 |
+
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
|
84 |
+
these scaling strategies behave:
|
85 |
+
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
|
86 |
+
experimental feature, subject to breaking API changes in future versions.
|
87 |
+
bos_token_id (`int`, *optional*, defaults to 11):
|
88 |
+
The id of the "beginning-of-sequence" token.
|
89 |
+
eos_token_id (`int`, *optional*, defaults to 11):
|
90 |
+
The id of the "end-of-sequence" token.
|
91 |
+
ffn_hidden_size (`int`, *optional*):
|
92 |
+
The hidden size of the feedforward layer in the Transformer decoder.
|
93 |
+
defaults to 4x hidden dim
|
94 |
+
activation (`str`, *optional*, defaults to `"gelu"`):
|
95 |
+
The activation function used in the feedforward layer.
|
96 |
+
|
97 |
+
Example:
|
98 |
+
|
99 |
+
```python
|
100 |
+
>>> from transformers import FalconModel, FalconConfig
|
101 |
+
|
102 |
+
>>> # Initializing a small (2-layer) Falcon configuration
|
103 |
+
>>> configuration = FalconConfig(num_hidden_layers=2)
|
104 |
+
|
105 |
+
>>> # Initializing a model from the small configuration
|
106 |
+
>>> model = FalconModel(configuration)
|
107 |
+
|
108 |
+
>>> # Accessing the model configuration
|
109 |
+
>>> configuration = model.config
|
110 |
+
```"""
|
111 |
+
|
112 |
+
model_type = "falcon"
|
113 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
114 |
+
|
115 |
+
def __init__(
|
116 |
+
self,
|
117 |
+
vocab_size=65024,
|
118 |
+
hidden_size=4544,
|
119 |
+
num_hidden_layers=32,
|
120 |
+
num_attention_heads=71,
|
121 |
+
layer_norm_epsilon=1e-5,
|
122 |
+
initializer_range=0.02,
|
123 |
+
use_cache=True,
|
124 |
+
hidden_dropout=0.0,
|
125 |
+
attention_dropout=0.0,
|
126 |
+
num_kv_heads=None,
|
127 |
+
alibi=False,
|
128 |
+
new_decoder_architecture=False,
|
129 |
+
multi_query=True,
|
130 |
+
parallel_attn=True,
|
131 |
+
bias=False,
|
132 |
+
max_position_embeddings=2048,
|
133 |
+
rope_theta=10000.0,
|
134 |
+
rope_scaling=None,
|
135 |
+
bos_token_id=11,
|
136 |
+
eos_token_id=11,
|
137 |
+
ffn_hidden_size=None,
|
138 |
+
activation="gelu",
|
139 |
+
**kwargs,
|
140 |
+
):
|
141 |
+
self.vocab_size = vocab_size
|
142 |
+
# Backward compatibility with n_embed kwarg
|
143 |
+
n_embed = kwargs.pop("n_embed", None)
|
144 |
+
self.hidden_size = hidden_size if n_embed is None else n_embed
|
145 |
+
self.num_hidden_layers = num_hidden_layers
|
146 |
+
self.num_attention_heads = num_attention_heads
|
147 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
148 |
+
self.initializer_range = initializer_range
|
149 |
+
self.use_cache = use_cache
|
150 |
+
self.hidden_dropout = hidden_dropout
|
151 |
+
self.attention_dropout = attention_dropout
|
152 |
+
self.bos_token_id = bos_token_id
|
153 |
+
self.eos_token_id = eos_token_id
|
154 |
+
self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads
|
155 |
+
self.alibi = alibi
|
156 |
+
self.new_decoder_architecture = new_decoder_architecture
|
157 |
+
self.multi_query = multi_query # Ignored when new_decoder_architecture is True
|
158 |
+
self.parallel_attn = parallel_attn
|
159 |
+
self.bias = bias
|
160 |
+
self.max_position_embeddings = max_position_embeddings
|
161 |
+
self.rope_theta = rope_theta
|
162 |
+
self.rope_scaling = rope_scaling
|
163 |
+
self.activation = activation
|
164 |
+
if ffn_hidden_size is None:
|
165 |
+
self.ffn_hidden_size = hidden_size * 4
|
166 |
+
else:
|
167 |
+
self.ffn_hidden_size = ffn_hidden_size
|
168 |
+
self._rope_scaling_validation()
|
169 |
+
|
170 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
171 |
+
|
172 |
+
@property
|
173 |
+
def head_dim(self):
|
174 |
+
return self.hidden_size // self.num_attention_heads
|
175 |
+
|
176 |
+
@property
|
177 |
+
def rotary(self):
|
178 |
+
return not self.alibi
|
179 |
+
|
180 |
+
def _rope_scaling_validation(self):
|
181 |
+
"""
|
182 |
+
Validate the `rope_scaling` configuration.
|
183 |
+
"""
|
184 |
+
if self.rope_scaling is None:
|
185 |
+
return
|
186 |
+
|
187 |
+
if self.alibi:
|
188 |
+
raise ValueError("`rope_scaling` is not supported when `alibi` is `True`.")
|
189 |
+
|
190 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
191 |
+
raise ValueError(
|
192 |
+
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
|
193 |
+
)
|
194 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
195 |
+
rope_scaling_factor = self.rope_scaling.get("factor", None)
|
196 |
+
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
|
197 |
+
raise ValueError(
|
198 |
+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
199 |
+
)
|
200 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
|
201 |
+
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
|
llmeval-env/lib/python3.10/site-packages/transformers/models/falcon/convert_custom_code_checkpoint.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from argparse import ArgumentParser
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
|
6 |
+
"""
|
7 |
+
This script converts Falcon custom code checkpoints to modern Falcon checkpoints that use code in the Transformers
|
8 |
+
library. After conversion, performance (especially for generation) should improve and the checkpoint can be loaded
|
9 |
+
without needing trust_remote_code=True.
|
10 |
+
"""
|
11 |
+
|
12 |
+
if __name__ == "__main__":
|
13 |
+
parser = ArgumentParser()
|
14 |
+
parser.add_argument(
|
15 |
+
"--checkpoint_dir",
|
16 |
+
type=Path,
|
17 |
+
required=True,
|
18 |
+
help="Directory containing a custom code checkpoint to convert to a modern Falcon checkpoint.",
|
19 |
+
)
|
20 |
+
args = parser.parse_args()
|
21 |
+
|
22 |
+
if not args.checkpoint_dir.is_dir():
|
23 |
+
raise ValueError("--checkpoint_dir argument should be a directory!")
|
24 |
+
|
25 |
+
if (
|
26 |
+
not (args.checkpoint_dir / "configuration_RW.py").is_file()
|
27 |
+
or not (args.checkpoint_dir / "modelling_RW.py").is_file()
|
28 |
+
):
|
29 |
+
raise ValueError(
|
30 |
+
"The model directory should contain configuration_RW.py and modelling_RW.py files! Are you sure this is a custom code checkpoint?"
|
31 |
+
)
|
32 |
+
(args.checkpoint_dir / "configuration_RW.py").unlink()
|
33 |
+
(args.checkpoint_dir / "modelling_RW.py").unlink()
|
34 |
+
|
35 |
+
config = args.checkpoint_dir / "config.json"
|
36 |
+
text = config.read_text()
|
37 |
+
text = text.replace("RWForCausalLM", "FalconForCausalLM")
|
38 |
+
text = text.replace("RefinedWebModel", "falcon")
|
39 |
+
text = text.replace("RefinedWeb", "falcon")
|
40 |
+
json_config = json.loads(text)
|
41 |
+
del json_config["auto_map"]
|
42 |
+
|
43 |
+
if "n_head" in json_config:
|
44 |
+
json_config["num_attention_heads"] = json_config.pop("n_head")
|
45 |
+
if "n_layer" in json_config:
|
46 |
+
json_config["num_hidden_layers"] = json_config.pop("n_layer")
|
47 |
+
if "n_head_kv" in json_config:
|
48 |
+
json_config["num_kv_heads"] = json_config.pop("n_head_kv")
|
49 |
+
json_config["new_decoder_architecture"] = True
|
50 |
+
else:
|
51 |
+
json_config["new_decoder_architecture"] = False
|
52 |
+
bos_token_id = json_config.get("bos_token_id", 1)
|
53 |
+
eos_token_id = json_config.get("eos_token_id", 2)
|
54 |
+
config.unlink()
|
55 |
+
config.write_text(json.dumps(json_config, indent=2, sort_keys=True))
|
56 |
+
|
57 |
+
tokenizer_config = args.checkpoint_dir / "tokenizer_config.json"
|
58 |
+
if tokenizer_config.is_file():
|
59 |
+
text = tokenizer_config.read_text()
|
60 |
+
json_config = json.loads(text)
|
61 |
+
if json_config["tokenizer_class"] == "PreTrainedTokenizerFast":
|
62 |
+
json_config["model_input_names"] = ["input_ids", "attention_mask"]
|
63 |
+
tokenizer_config.unlink()
|
64 |
+
tokenizer_config.write_text(json.dumps(json_config, indent=2, sort_keys=True))
|
65 |
+
|
66 |
+
generation_config_path = args.checkpoint_dir / "generation_config.json"
|
67 |
+
generation_dict = {
|
68 |
+
"_from_model_config": True,
|
69 |
+
"bos_token_id": bos_token_id,
|
70 |
+
"eos_token_id": eos_token_id,
|
71 |
+
"transformers_version": "4.33.0.dev0",
|
72 |
+
}
|
73 |
+
generation_config_path.write_text(json.dumps(generation_dict, indent=2, sort_keys=True))
|
74 |
+
print("Done! Please double-check that the new checkpoint works as expected.")
|
llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__init__.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
|
22 |
+
}
|
23 |
+
|
24 |
+
try:
|
25 |
+
if not is_torch_available():
|
26 |
+
raise OptionalDependencyNotAvailable()
|
27 |
+
except OptionalDependencyNotAvailable:
|
28 |
+
pass
|
29 |
+
else:
|
30 |
+
_import_structure["modeling_lilt"] = [
|
31 |
+
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
32 |
+
"LiltForQuestionAnswering",
|
33 |
+
"LiltForSequenceClassification",
|
34 |
+
"LiltForTokenClassification",
|
35 |
+
"LiltModel",
|
36 |
+
"LiltPreTrainedModel",
|
37 |
+
]
|
38 |
+
|
39 |
+
if TYPE_CHECKING:
|
40 |
+
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
|
41 |
+
|
42 |
+
try:
|
43 |
+
if not is_torch_available():
|
44 |
+
raise OptionalDependencyNotAvailable()
|
45 |
+
except OptionalDependencyNotAvailable:
|
46 |
+
pass
|
47 |
+
else:
|
48 |
+
from .modeling_lilt import (
|
49 |
+
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
50 |
+
LiltForQuestionAnswering,
|
51 |
+
LiltForSequenceClassification,
|
52 |
+
LiltForTokenClassification,
|
53 |
+
LiltModel,
|
54 |
+
LiltPreTrainedModel,
|
55 |
+
)
|
56 |
+
|
57 |
+
else:
|
58 |
+
import sys
|
59 |
+
|
60 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (983 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/configuration_lilt.cpython-310.pyc
ADDED
Binary file (5.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/modeling_lilt.cpython-310.pyc
ADDED
Binary file (34.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/configuration_lilt.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" LiLT configuration"""
|
16 |
+
|
17 |
+
from ...configuration_utils import PretrainedConfig
|
18 |
+
from ...utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from ..deprecated._archive_maps import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class LiltConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`LiltModel`]. It is used to instantiate a LiLT
|
30 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
31 |
+
defaults will yield a similar configuration to that of the LiLT
|
32 |
+
[SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) architecture.
|
33 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
34 |
+
documentation from [`PretrainedConfig`] for more information.
|
35 |
+
|
36 |
+
Args:
|
37 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
38 |
+
Vocabulary size of the LiLT model. Defines the number of different tokens that can be represented by the
|
39 |
+
`inputs_ids` passed when calling [`LiltModel`].
|
40 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
41 |
+
Dimensionality of the encoder layers and the pooler layer. Should be a multiple of 24.
|
42 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
43 |
+
Number of hidden layers in the Transformer encoder.
|
44 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
45 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
46 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
47 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
48 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
49 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
50 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
51 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
52 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
53 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
54 |
+
The dropout ratio for the attention probabilities.
|
55 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
56 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
57 |
+
just in case (e.g., 512 or 1024 or 2048).
|
58 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
59 |
+
The vocabulary size of the `token_type_ids` passed when calling [`LiltModel`].
|
60 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
61 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
62 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
63 |
+
The epsilon used by the layer normalization layers.
|
64 |
+
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
|
65 |
+
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
|
66 |
+
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
|
67 |
+
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
|
68 |
+
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
|
69 |
+
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
|
70 |
+
classifier_dropout (`float`, *optional*):
|
71 |
+
The dropout ratio for the classification head.
|
72 |
+
channel_shrink_ratio (`int`, *optional*, defaults to 4):
|
73 |
+
The shrink ratio compared to the `hidden_size` for the channel dimension of the layout embeddings.
|
74 |
+
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
|
75 |
+
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
|
76 |
+
large just in case (e.g., 1024).
|
77 |
+
|
78 |
+
Examples:
|
79 |
+
|
80 |
+
```python
|
81 |
+
>>> from transformers import LiltConfig, LiltModel
|
82 |
+
|
83 |
+
>>> # Initializing a LiLT SCUT-DLVCLab/lilt-roberta-en-base style configuration
|
84 |
+
>>> configuration = LiltConfig()
|
85 |
+
>>> # Randomly initializing a model from the SCUT-DLVCLab/lilt-roberta-en-base style configuration
|
86 |
+
>>> model = LiltModel(configuration)
|
87 |
+
>>> # Accessing the model configuration
|
88 |
+
>>> configuration = model.config
|
89 |
+
```"""
|
90 |
+
|
91 |
+
model_type = "lilt"
|
92 |
+
|
93 |
+
def __init__(
|
94 |
+
self,
|
95 |
+
vocab_size=30522,
|
96 |
+
hidden_size=768,
|
97 |
+
num_hidden_layers=12,
|
98 |
+
num_attention_heads=12,
|
99 |
+
intermediate_size=3072,
|
100 |
+
hidden_act="gelu",
|
101 |
+
hidden_dropout_prob=0.1,
|
102 |
+
attention_probs_dropout_prob=0.1,
|
103 |
+
max_position_embeddings=512,
|
104 |
+
type_vocab_size=2,
|
105 |
+
initializer_range=0.02,
|
106 |
+
layer_norm_eps=1e-12,
|
107 |
+
pad_token_id=0,
|
108 |
+
position_embedding_type="absolute",
|
109 |
+
classifier_dropout=None,
|
110 |
+
channel_shrink_ratio=4,
|
111 |
+
max_2d_position_embeddings=1024,
|
112 |
+
**kwargs,
|
113 |
+
):
|
114 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
115 |
+
|
116 |
+
self.vocab_size = vocab_size
|
117 |
+
self.hidden_size = hidden_size
|
118 |
+
self.num_hidden_layers = num_hidden_layers
|
119 |
+
self.num_attention_heads = num_attention_heads
|
120 |
+
self.hidden_act = hidden_act
|
121 |
+
self.intermediate_size = intermediate_size
|
122 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
123 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
124 |
+
self.max_position_embeddings = max_position_embeddings
|
125 |
+
self.type_vocab_size = type_vocab_size
|
126 |
+
self.initializer_range = initializer_range
|
127 |
+
self.layer_norm_eps = layer_norm_eps
|
128 |
+
self.position_embedding_type = position_embedding_type
|
129 |
+
self.classifier_dropout = classifier_dropout
|
130 |
+
self.channel_shrink_ratio = channel_shrink_ratio
|
131 |
+
self.max_2d_position_embeddings = max_2d_position_embeddings
|
llmeval-env/lib/python3.10/site-packages/transformers/models/lilt/modeling_lilt.py
ADDED
@@ -0,0 +1,1186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""PyTorch LiLT model."""
|
16 |
+
|
17 |
+
import math
|
18 |
+
from typing import Optional, Tuple, Union
|
19 |
+
|
20 |
+
import torch
|
21 |
+
import torch.utils.checkpoint
|
22 |
+
from torch import nn
|
23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
24 |
+
|
25 |
+
from ...activations import ACT2FN
|
26 |
+
from ...modeling_outputs import (
|
27 |
+
BaseModelOutput,
|
28 |
+
BaseModelOutputWithPooling,
|
29 |
+
QuestionAnsweringModelOutput,
|
30 |
+
SequenceClassifierOutput,
|
31 |
+
TokenClassifierOutput,
|
32 |
+
)
|
33 |
+
from ...modeling_utils import PreTrainedModel
|
34 |
+
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
|
35 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
36 |
+
from .configuration_lilt import LiltConfig
|
37 |
+
|
38 |
+
|
39 |
+
logger = logging.get_logger(__name__)
|
40 |
+
|
41 |
+
_CONFIG_FOR_DOC = "LiltConfig"
|
42 |
+
|
43 |
+
|
44 |
+
from ..deprecated._archive_maps import LILT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
45 |
+
|
46 |
+
|
47 |
+
class LiltTextEmbeddings(nn.Module):
|
48 |
+
def __init__(self, config):
|
49 |
+
super().__init__()
|
50 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
51 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
52 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
53 |
+
|
54 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
55 |
+
# any TensorFlow checkpoint file
|
56 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
57 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
58 |
+
|
59 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
60 |
+
self.register_buffer(
|
61 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
62 |
+
)
|
63 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
64 |
+
|
65 |
+
# End copy
|
66 |
+
self.padding_idx = config.pad_token_id
|
67 |
+
self.position_embeddings = nn.Embedding(
|
68 |
+
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
|
69 |
+
)
|
70 |
+
|
71 |
+
def forward(
|
72 |
+
self,
|
73 |
+
input_ids=None,
|
74 |
+
token_type_ids=None,
|
75 |
+
position_ids=None,
|
76 |
+
inputs_embeds=None,
|
77 |
+
):
|
78 |
+
if position_ids is None:
|
79 |
+
if input_ids is not None:
|
80 |
+
# Create the position ids from the input token ids. Any padded tokens remain padded.
|
81 |
+
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(
|
82 |
+
input_ids.device
|
83 |
+
)
|
84 |
+
else:
|
85 |
+
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
|
86 |
+
|
87 |
+
if input_ids is not None:
|
88 |
+
input_shape = input_ids.size()
|
89 |
+
else:
|
90 |
+
input_shape = inputs_embeds.size()[:-1]
|
91 |
+
|
92 |
+
if token_type_ids is None:
|
93 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
94 |
+
|
95 |
+
if inputs_embeds is None:
|
96 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
97 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
98 |
+
|
99 |
+
embeddings = inputs_embeds + token_type_embeddings
|
100 |
+
if self.position_embedding_type == "absolute":
|
101 |
+
position_embeddings = self.position_embeddings(position_ids)
|
102 |
+
embeddings += position_embeddings
|
103 |
+
embeddings = self.LayerNorm(embeddings)
|
104 |
+
embeddings = self.dropout(embeddings)
|
105 |
+
return embeddings, position_ids
|
106 |
+
|
107 |
+
def create_position_ids_from_input_ids(self, input_ids, padding_idx):
|
108 |
+
"""
|
109 |
+
Args:
|
110 |
+
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
|
111 |
+
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
|
112 |
+
x: torch.Tensor x:
|
113 |
+
Returns: torch.Tensor
|
114 |
+
"""
|
115 |
+
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
|
116 |
+
mask = input_ids.ne(padding_idx).int()
|
117 |
+
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
|
118 |
+
return incremental_indices.long() + padding_idx
|
119 |
+
|
120 |
+
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
|
121 |
+
"""
|
122 |
+
Args:
|
123 |
+
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.:
|
124 |
+
inputs_embeds: torch.Tensor
|
125 |
+
Returns: torch.Tensor
|
126 |
+
"""
|
127 |
+
input_shape = inputs_embeds.size()[:-1]
|
128 |
+
sequence_length = input_shape[1]
|
129 |
+
|
130 |
+
position_ids = torch.arange(
|
131 |
+
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
|
132 |
+
)
|
133 |
+
return position_ids.unsqueeze(0).expand(input_shape)
|
134 |
+
|
135 |
+
|
136 |
+
class LiltLayoutEmbeddings(nn.Module):
|
137 |
+
def __init__(self, config):
|
138 |
+
super().__init__()
|
139 |
+
# we divide the hidden_size by 6 here as there are 6 different layout embeddings,
|
140 |
+
# namely left_position, upper_position, right_position, lower_position, height, width
|
141 |
+
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
|
142 |
+
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
|
143 |
+
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
|
144 |
+
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
|
145 |
+
|
146 |
+
self.padding_idx = config.pad_token_id
|
147 |
+
self.box_position_embeddings = nn.Embedding(
|
148 |
+
config.max_position_embeddings,
|
149 |
+
config.hidden_size // config.channel_shrink_ratio,
|
150 |
+
padding_idx=self.padding_idx,
|
151 |
+
)
|
152 |
+
self.box_linear_embeddings = nn.Linear(
|
153 |
+
in_features=config.hidden_size, out_features=config.hidden_size // config.channel_shrink_ratio
|
154 |
+
)
|
155 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size // config.channel_shrink_ratio, eps=config.layer_norm_eps)
|
156 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
157 |
+
|
158 |
+
def forward(self, bbox=None, position_ids=None):
|
159 |
+
try:
|
160 |
+
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
|
161 |
+
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
|
162 |
+
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
|
163 |
+
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
|
164 |
+
except IndexError as e:
|
165 |
+
raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
|
166 |
+
|
167 |
+
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
|
168 |
+
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
|
169 |
+
|
170 |
+
spatial_position_embeddings = torch.cat(
|
171 |
+
[
|
172 |
+
left_position_embeddings,
|
173 |
+
upper_position_embeddings,
|
174 |
+
right_position_embeddings,
|
175 |
+
lower_position_embeddings,
|
176 |
+
h_position_embeddings,
|
177 |
+
w_position_embeddings,
|
178 |
+
],
|
179 |
+
dim=-1,
|
180 |
+
)
|
181 |
+
spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings)
|
182 |
+
box_position_embeddings = self.box_position_embeddings(position_ids)
|
183 |
+
|
184 |
+
spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings
|
185 |
+
|
186 |
+
spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings)
|
187 |
+
spatial_position_embeddings = self.dropout(spatial_position_embeddings)
|
188 |
+
|
189 |
+
return spatial_position_embeddings
|
190 |
+
|
191 |
+
|
192 |
+
class LiltSelfAttention(nn.Module):
|
193 |
+
def __init__(self, config, position_embedding_type=None):
|
194 |
+
super().__init__()
|
195 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
196 |
+
raise ValueError(
|
197 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
198 |
+
f"heads ({config.num_attention_heads})"
|
199 |
+
)
|
200 |
+
|
201 |
+
self.num_attention_heads = config.num_attention_heads
|
202 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
203 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
204 |
+
|
205 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
206 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
207 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
208 |
+
|
209 |
+
self.layout_query = nn.Linear(
|
210 |
+
config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio
|
211 |
+
)
|
212 |
+
self.layout_key = nn.Linear(
|
213 |
+
config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio
|
214 |
+
)
|
215 |
+
self.layout_value = nn.Linear(
|
216 |
+
config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio
|
217 |
+
)
|
218 |
+
|
219 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
220 |
+
self.position_embedding_type = position_embedding_type or getattr(
|
221 |
+
config, "position_embedding_type", "absolute"
|
222 |
+
)
|
223 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
224 |
+
self.max_position_embeddings = config.max_position_embeddings
|
225 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
226 |
+
|
227 |
+
self.channel_shrink_ratio = config.channel_shrink_ratio
|
228 |
+
|
229 |
+
def transpose_for_scores(self, x, r=1):
|
230 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size // r)
|
231 |
+
x = x.view(*new_x_shape)
|
232 |
+
return x.permute(0, 2, 1, 3)
|
233 |
+
|
234 |
+
def forward(
|
235 |
+
self,
|
236 |
+
hidden_states,
|
237 |
+
layout_inputs,
|
238 |
+
attention_mask=None,
|
239 |
+
head_mask=None,
|
240 |
+
output_attentions=False,
|
241 |
+
):
|
242 |
+
layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio)
|
243 |
+
layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio)
|
244 |
+
layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio)
|
245 |
+
|
246 |
+
mixed_query_layer = self.query(hidden_states)
|
247 |
+
|
248 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
249 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
250 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
251 |
+
|
252 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
253 |
+
layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2))
|
254 |
+
|
255 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
256 |
+
seq_length = hidden_states.size()[1]
|
257 |
+
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
258 |
+
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
259 |
+
distance = position_ids_l - position_ids_r
|
260 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
261 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
262 |
+
|
263 |
+
if self.position_embedding_type == "relative_key":
|
264 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
265 |
+
attention_scores = attention_scores + relative_position_scores
|
266 |
+
elif self.position_embedding_type == "relative_key_query":
|
267 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
268 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
269 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
270 |
+
|
271 |
+
tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
272 |
+
tmp_layout_attention_scores = layout_attention_scores / math.sqrt(
|
273 |
+
self.attention_head_size // self.channel_shrink_ratio
|
274 |
+
)
|
275 |
+
attention_scores = tmp_attention_scores + tmp_layout_attention_scores
|
276 |
+
layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores
|
277 |
+
|
278 |
+
if attention_mask is not None:
|
279 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
280 |
+
layout_attention_scores = layout_attention_scores + attention_mask
|
281 |
+
|
282 |
+
# Normalize the attention scores to probabilities.
|
283 |
+
layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores)
|
284 |
+
|
285 |
+
# This is actually dropping out entire tokens to attend to, which might
|
286 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
287 |
+
layout_attention_probs = self.dropout(layout_attention_probs)
|
288 |
+
|
289 |
+
# Mask heads if we want to
|
290 |
+
if head_mask is not None:
|
291 |
+
layout_attention_probs = layout_attention_probs * head_mask
|
292 |
+
|
293 |
+
layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer)
|
294 |
+
|
295 |
+
layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous()
|
296 |
+
new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size // self.channel_shrink_ratio,)
|
297 |
+
layout_context_layer = layout_context_layer.view(*new_context_layer_shape)
|
298 |
+
|
299 |
+
if attention_mask is not None:
|
300 |
+
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
|
301 |
+
attention_scores = attention_scores + attention_mask
|
302 |
+
|
303 |
+
# Normalize the attention scores to probabilities.
|
304 |
+
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
305 |
+
|
306 |
+
# This is actually dropping out entire tokens to attend to, which might
|
307 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
308 |
+
attention_probs = self.dropout(attention_probs)
|
309 |
+
|
310 |
+
# Mask heads if we want to
|
311 |
+
if head_mask is not None:
|
312 |
+
attention_probs = attention_probs * head_mask
|
313 |
+
|
314 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
315 |
+
|
316 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
317 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
318 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
319 |
+
|
320 |
+
outputs = (
|
321 |
+
((context_layer, layout_context_layer), attention_probs)
|
322 |
+
if output_attentions
|
323 |
+
else ((context_layer, layout_context_layer),)
|
324 |
+
)
|
325 |
+
|
326 |
+
return outputs
|
327 |
+
|
328 |
+
|
329 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
|
330 |
+
class LiltSelfOutput(nn.Module):
|
331 |
+
def __init__(self, config):
|
332 |
+
super().__init__()
|
333 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
334 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
335 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
336 |
+
|
337 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
338 |
+
hidden_states = self.dense(hidden_states)
|
339 |
+
hidden_states = self.dropout(hidden_states)
|
340 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
341 |
+
return hidden_states
|
342 |
+
|
343 |
+
|
344 |
+
class LiltAttention(nn.Module):
|
345 |
+
def __init__(self, config, position_embedding_type=None):
|
346 |
+
super().__init__()
|
347 |
+
self.self = LiltSelfAttention(config, position_embedding_type=position_embedding_type)
|
348 |
+
self.output = LiltSelfOutput(config)
|
349 |
+
self.pruned_heads = set()
|
350 |
+
|
351 |
+
ori_hidden_size = config.hidden_size
|
352 |
+
config.hidden_size = config.hidden_size // config.channel_shrink_ratio
|
353 |
+
self.layout_output = LiltSelfOutput(config)
|
354 |
+
config.hidden_size = ori_hidden_size
|
355 |
+
|
356 |
+
# Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
|
357 |
+
def prune_heads(self, heads):
|
358 |
+
if len(heads) == 0:
|
359 |
+
return
|
360 |
+
heads, index = find_pruneable_heads_and_indices(
|
361 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
362 |
+
)
|
363 |
+
|
364 |
+
# Prune linear layers
|
365 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
366 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
367 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
368 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
369 |
+
|
370 |
+
# Update hyper params and store pruned heads
|
371 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
372 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
373 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
374 |
+
|
375 |
+
def forward(
|
376 |
+
self,
|
377 |
+
hidden_states: torch.Tensor,
|
378 |
+
layout_inputs: torch.Tensor,
|
379 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
380 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
381 |
+
output_attentions: Optional[bool] = False,
|
382 |
+
) -> Tuple[torch.Tensor]:
|
383 |
+
self_outputs = self.self(
|
384 |
+
hidden_states,
|
385 |
+
layout_inputs,
|
386 |
+
attention_mask,
|
387 |
+
head_mask,
|
388 |
+
output_attentions,
|
389 |
+
)
|
390 |
+
attention_output = self.output(self_outputs[0][0], hidden_states)
|
391 |
+
layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs)
|
392 |
+
outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them
|
393 |
+
return outputs
|
394 |
+
|
395 |
+
|
396 |
+
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
|
397 |
+
class LiltIntermediate(nn.Module):
|
398 |
+
def __init__(self, config):
|
399 |
+
super().__init__()
|
400 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
401 |
+
if isinstance(config.hidden_act, str):
|
402 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
403 |
+
else:
|
404 |
+
self.intermediate_act_fn = config.hidden_act
|
405 |
+
|
406 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
407 |
+
hidden_states = self.dense(hidden_states)
|
408 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
409 |
+
return hidden_states
|
410 |
+
|
411 |
+
|
412 |
+
# Copied from transformers.models.bert.modeling_bert.BertOutput
|
413 |
+
class LiltOutput(nn.Module):
|
414 |
+
def __init__(self, config):
|
415 |
+
super().__init__()
|
416 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
417 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
418 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
419 |
+
|
420 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
421 |
+
hidden_states = self.dense(hidden_states)
|
422 |
+
hidden_states = self.dropout(hidden_states)
|
423 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
424 |
+
return hidden_states
|
425 |
+
|
426 |
+
|
427 |
+
class LiltLayer(nn.Module):
|
428 |
+
def __init__(self, config):
|
429 |
+
super().__init__()
|
430 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
431 |
+
self.seq_len_dim = 1
|
432 |
+
self.attention = LiltAttention(config)
|
433 |
+
self.intermediate = LiltIntermediate(config)
|
434 |
+
self.output = LiltOutput(config)
|
435 |
+
|
436 |
+
ori_hidden_size = config.hidden_size
|
437 |
+
ori_intermediate_size = config.intermediate_size
|
438 |
+
config.hidden_size = config.hidden_size // config.channel_shrink_ratio
|
439 |
+
config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio
|
440 |
+
self.layout_intermediate = LiltIntermediate(config)
|
441 |
+
self.layout_output = LiltOutput(config)
|
442 |
+
config.hidden_size = ori_hidden_size
|
443 |
+
config.intermediate_size = ori_intermediate_size
|
444 |
+
|
445 |
+
def forward(
|
446 |
+
self,
|
447 |
+
hidden_states: torch.Tensor,
|
448 |
+
layout_inputs: torch.Tensor,
|
449 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
450 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
451 |
+
output_attentions: Optional[bool] = False,
|
452 |
+
) -> Tuple[torch.Tensor]:
|
453 |
+
self_attention_outputs = self.attention(
|
454 |
+
hidden_states,
|
455 |
+
layout_inputs,
|
456 |
+
attention_mask,
|
457 |
+
head_mask,
|
458 |
+
output_attentions=output_attentions,
|
459 |
+
)
|
460 |
+
attention_output = self_attention_outputs[0][0]
|
461 |
+
layout_attention_output = self_attention_outputs[0][1]
|
462 |
+
|
463 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
464 |
+
|
465 |
+
layer_output = apply_chunking_to_forward(
|
466 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
467 |
+
)
|
468 |
+
layout_layer_output = apply_chunking_to_forward(
|
469 |
+
self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output
|
470 |
+
)
|
471 |
+
outputs = ((layer_output, layout_layer_output),) + outputs
|
472 |
+
|
473 |
+
return outputs
|
474 |
+
|
475 |
+
# Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk
|
476 |
+
def feed_forward_chunk(self, attention_output):
|
477 |
+
intermediate_output = self.intermediate(attention_output)
|
478 |
+
layer_output = self.output(intermediate_output, attention_output)
|
479 |
+
return layer_output
|
480 |
+
|
481 |
+
def layout_feed_forward_chunk(self, attention_output):
|
482 |
+
intermediate_output = self.layout_intermediate(attention_output)
|
483 |
+
layer_output = self.layout_output(intermediate_output, attention_output)
|
484 |
+
return layer_output
|
485 |
+
|
486 |
+
|
487 |
+
class LiltEncoder(nn.Module):
|
488 |
+
# Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Lilt
|
489 |
+
def __init__(self, config):
|
490 |
+
super().__init__()
|
491 |
+
self.config = config
|
492 |
+
self.layer = nn.ModuleList([LiltLayer(config) for _ in range(config.num_hidden_layers)])
|
493 |
+
self.gradient_checkpointing = False
|
494 |
+
|
495 |
+
def forward(
|
496 |
+
self,
|
497 |
+
hidden_states: torch.Tensor,
|
498 |
+
layout_inputs: torch.Tensor,
|
499 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
500 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
501 |
+
output_attentions: Optional[bool] = False,
|
502 |
+
output_hidden_states: Optional[bool] = False,
|
503 |
+
return_dict: Optional[bool] = True,
|
504 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
|
505 |
+
all_hidden_states = () if output_hidden_states else None
|
506 |
+
all_self_attentions = () if output_attentions else None
|
507 |
+
|
508 |
+
for i, layer_module in enumerate(self.layer):
|
509 |
+
if output_hidden_states:
|
510 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
511 |
+
|
512 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
513 |
+
|
514 |
+
if self.gradient_checkpointing and self.training:
|
515 |
+
layer_outputs = self._gradient_checkpointing_func(
|
516 |
+
layer_module.__call__,
|
517 |
+
hidden_states,
|
518 |
+
layout_inputs,
|
519 |
+
attention_mask,
|
520 |
+
layer_head_mask,
|
521 |
+
output_attentions,
|
522 |
+
)
|
523 |
+
else:
|
524 |
+
layer_outputs = layer_module(
|
525 |
+
hidden_states,
|
526 |
+
layout_inputs,
|
527 |
+
attention_mask,
|
528 |
+
layer_head_mask,
|
529 |
+
output_attentions,
|
530 |
+
)
|
531 |
+
|
532 |
+
hidden_states = layer_outputs[0][0]
|
533 |
+
layout_inputs = layer_outputs[0][1]
|
534 |
+
|
535 |
+
if output_attentions:
|
536 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
537 |
+
|
538 |
+
if output_hidden_states:
|
539 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
540 |
+
|
541 |
+
if not return_dict:
|
542 |
+
return tuple(
|
543 |
+
v
|
544 |
+
for v in [
|
545 |
+
hidden_states,
|
546 |
+
all_hidden_states,
|
547 |
+
all_self_attentions,
|
548 |
+
]
|
549 |
+
if v is not None
|
550 |
+
)
|
551 |
+
return BaseModelOutput(
|
552 |
+
last_hidden_state=hidden_states,
|
553 |
+
hidden_states=all_hidden_states,
|
554 |
+
attentions=all_self_attentions,
|
555 |
+
)
|
556 |
+
|
557 |
+
|
558 |
+
# Copied from transformers.models.bert.modeling_bert.BertPooler
|
559 |
+
class LiltPooler(nn.Module):
|
560 |
+
def __init__(self, config):
|
561 |
+
super().__init__()
|
562 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
563 |
+
self.activation = nn.Tanh()
|
564 |
+
|
565 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
566 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
567 |
+
# to the first token.
|
568 |
+
first_token_tensor = hidden_states[:, 0]
|
569 |
+
pooled_output = self.dense(first_token_tensor)
|
570 |
+
pooled_output = self.activation(pooled_output)
|
571 |
+
return pooled_output
|
572 |
+
|
573 |
+
|
574 |
+
class LiltPreTrainedModel(PreTrainedModel):
|
575 |
+
"""
|
576 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
577 |
+
models.
|
578 |
+
"""
|
579 |
+
|
580 |
+
config_class = LiltConfig
|
581 |
+
base_model_prefix = "lilt"
|
582 |
+
supports_gradient_checkpointing = True
|
583 |
+
_no_split_modules = []
|
584 |
+
|
585 |
+
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
|
586 |
+
def _init_weights(self, module):
|
587 |
+
"""Initialize the weights"""
|
588 |
+
if isinstance(module, nn.Linear):
|
589 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
590 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
591 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
592 |
+
if module.bias is not None:
|
593 |
+
module.bias.data.zero_()
|
594 |
+
elif isinstance(module, nn.Embedding):
|
595 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
596 |
+
if module.padding_idx is not None:
|
597 |
+
module.weight.data[module.padding_idx].zero_()
|
598 |
+
elif isinstance(module, nn.LayerNorm):
|
599 |
+
module.bias.data.zero_()
|
600 |
+
module.weight.data.fill_(1.0)
|
601 |
+
|
602 |
+
|
603 |
+
LILT_START_DOCSTRING = r"""
|
604 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
605 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
606 |
+
etc.)
|
607 |
+
|
608 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
609 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
610 |
+
and behavior.
|
611 |
+
|
612 |
+
Parameters:
|
613 |
+
config ([`LiltConfig`]): Model configuration class with all the parameters of the
|
614 |
+
model. Initializing with a config file does not load the weights associated with the model, only the
|
615 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
616 |
+
"""
|
617 |
+
|
618 |
+
LILT_INPUTS_DOCSTRING = r"""
|
619 |
+
Args:
|
620 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
621 |
+
Indices of input sequence tokens in the vocabulary.
|
622 |
+
|
623 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
624 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
625 |
+
|
626 |
+
[What are input IDs?](../glossary#input-ids)
|
627 |
+
|
628 |
+
bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
|
629 |
+
Bounding boxes of each input sequence tokens. Selected in the range `[0,
|
630 |
+
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
|
631 |
+
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
|
632 |
+
y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
|
633 |
+
|
634 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
635 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
636 |
+
|
637 |
+
- 1 for tokens that are **not masked**,
|
638 |
+
- 0 for tokens that are **masked**.
|
639 |
+
|
640 |
+
[What are attention masks?](../glossary#attention-mask)
|
641 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
642 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
643 |
+
1]`:
|
644 |
+
|
645 |
+
- 0 corresponds to a *sentence A* token,
|
646 |
+
- 1 corresponds to a *sentence B* token.
|
647 |
+
|
648 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
649 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
650 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
651 |
+
config.max_position_embeddings - 1]`.
|
652 |
+
|
653 |
+
[What are position IDs?](../glossary#position-ids)
|
654 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
655 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
656 |
+
|
657 |
+
- 1 indicates the head is **not masked**,
|
658 |
+
- 0 indicates the head is **masked**.
|
659 |
+
|
660 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
661 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
662 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
663 |
+
model's internal embedding lookup matrix.
|
664 |
+
output_attentions (`bool`, *optional*):
|
665 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
666 |
+
tensors for more detail.
|
667 |
+
output_hidden_states (`bool`, *optional*):
|
668 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
669 |
+
more detail.
|
670 |
+
return_dict (`bool`, *optional*):
|
671 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
672 |
+
"""
|
673 |
+
|
674 |
+
|
675 |
+
@add_start_docstrings(
|
676 |
+
"The bare LiLT Model transformer outputting raw hidden-states without any specific head on top.",
|
677 |
+
LILT_START_DOCSTRING,
|
678 |
+
)
|
679 |
+
class LiltModel(LiltPreTrainedModel):
|
680 |
+
def __init__(self, config, add_pooling_layer=True):
|
681 |
+
super().__init__(config)
|
682 |
+
self.config = config
|
683 |
+
|
684 |
+
self.embeddings = LiltTextEmbeddings(config)
|
685 |
+
self.layout_embeddings = LiltLayoutEmbeddings(config)
|
686 |
+
self.encoder = LiltEncoder(config)
|
687 |
+
|
688 |
+
self.pooler = LiltPooler(config) if add_pooling_layer else None
|
689 |
+
|
690 |
+
# Initialize weights and apply final processing
|
691 |
+
self.post_init()
|
692 |
+
|
693 |
+
def get_input_embeddings(self):
|
694 |
+
return self.embeddings.word_embeddings
|
695 |
+
|
696 |
+
def set_input_embeddings(self, value):
|
697 |
+
self.embeddings.word_embeddings = value
|
698 |
+
|
699 |
+
def _prune_heads(self, heads_to_prune):
|
700 |
+
"""
|
701 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
702 |
+
class PreTrainedModel
|
703 |
+
"""
|
704 |
+
for layer, heads in heads_to_prune.items():
|
705 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
706 |
+
|
707 |
+
@add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
708 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
|
709 |
+
def forward(
|
710 |
+
self,
|
711 |
+
input_ids: Optional[torch.Tensor] = None,
|
712 |
+
bbox: Optional[torch.Tensor] = None,
|
713 |
+
attention_mask: Optional[torch.Tensor] = None,
|
714 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
715 |
+
position_ids: Optional[torch.Tensor] = None,
|
716 |
+
head_mask: Optional[torch.Tensor] = None,
|
717 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
718 |
+
output_attentions: Optional[bool] = None,
|
719 |
+
output_hidden_states: Optional[bool] = None,
|
720 |
+
return_dict: Optional[bool] = None,
|
721 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
|
722 |
+
r"""
|
723 |
+
|
724 |
+
Returns:
|
725 |
+
|
726 |
+
Examples:
|
727 |
+
|
728 |
+
```python
|
729 |
+
>>> from transformers import AutoTokenizer, AutoModel
|
730 |
+
>>> from datasets import load_dataset
|
731 |
+
|
732 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
733 |
+
>>> model = AutoModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
734 |
+
|
735 |
+
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
|
736 |
+
>>> example = dataset[0]
|
737 |
+
>>> words = example["tokens"]
|
738 |
+
>>> boxes = example["bboxes"]
|
739 |
+
|
740 |
+
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
|
741 |
+
|
742 |
+
>>> outputs = model(**encoding)
|
743 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
744 |
+
```"""
|
745 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
746 |
+
output_hidden_states = (
|
747 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
748 |
+
)
|
749 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
750 |
+
|
751 |
+
if input_ids is not None and inputs_embeds is not None:
|
752 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
753 |
+
elif input_ids is not None:
|
754 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
755 |
+
input_shape = input_ids.size()
|
756 |
+
elif inputs_embeds is not None:
|
757 |
+
input_shape = inputs_embeds.size()[:-1]
|
758 |
+
else:
|
759 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
760 |
+
|
761 |
+
batch_size, seq_length = input_shape
|
762 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
763 |
+
|
764 |
+
if bbox is None:
|
765 |
+
bbox = torch.zeros(input_shape + (4,), dtype=torch.long, device=device)
|
766 |
+
|
767 |
+
if attention_mask is None:
|
768 |
+
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
|
769 |
+
|
770 |
+
if token_type_ids is None:
|
771 |
+
if hasattr(self.embeddings, "token_type_ids"):
|
772 |
+
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
|
773 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
|
774 |
+
token_type_ids = buffered_token_type_ids_expanded
|
775 |
+
else:
|
776 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
777 |
+
|
778 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
779 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
780 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
|
781 |
+
|
782 |
+
# Prepare head mask if needed
|
783 |
+
# 1.0 in head_mask indicate we keep the head
|
784 |
+
# attention_probs has shape bsz x n_heads x N x N
|
785 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
786 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
787 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
788 |
+
|
789 |
+
embedding_output, position_ids = self.embeddings(
|
790 |
+
input_ids=input_ids,
|
791 |
+
position_ids=position_ids,
|
792 |
+
token_type_ids=token_type_ids,
|
793 |
+
inputs_embeds=inputs_embeds,
|
794 |
+
)
|
795 |
+
|
796 |
+
layout_embedding_output = self.layout_embeddings(bbox=bbox, position_ids=position_ids)
|
797 |
+
|
798 |
+
encoder_outputs = self.encoder(
|
799 |
+
embedding_output,
|
800 |
+
layout_embedding_output,
|
801 |
+
attention_mask=extended_attention_mask,
|
802 |
+
head_mask=head_mask,
|
803 |
+
output_attentions=output_attentions,
|
804 |
+
output_hidden_states=output_hidden_states,
|
805 |
+
return_dict=return_dict,
|
806 |
+
)
|
807 |
+
sequence_output = encoder_outputs[0]
|
808 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
809 |
+
|
810 |
+
if not return_dict:
|
811 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
812 |
+
|
813 |
+
return BaseModelOutputWithPooling(
|
814 |
+
last_hidden_state=sequence_output,
|
815 |
+
pooler_output=pooled_output,
|
816 |
+
hidden_states=encoder_outputs.hidden_states,
|
817 |
+
attentions=encoder_outputs.attentions,
|
818 |
+
)
|
819 |
+
|
820 |
+
|
821 |
+
@add_start_docstrings(
|
822 |
+
"""
|
823 |
+
LiLT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
|
824 |
+
output) e.g. for GLUE tasks.
|
825 |
+
""",
|
826 |
+
LILT_START_DOCSTRING,
|
827 |
+
)
|
828 |
+
class LiltForSequenceClassification(LiltPreTrainedModel):
|
829 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification.__init__ with Roberta->Lilt, roberta->lilt
|
830 |
+
def __init__(self, config):
|
831 |
+
super().__init__(config)
|
832 |
+
self.num_labels = config.num_labels
|
833 |
+
self.config = config
|
834 |
+
|
835 |
+
self.lilt = LiltModel(config, add_pooling_layer=False)
|
836 |
+
self.classifier = LiltClassificationHead(config)
|
837 |
+
|
838 |
+
# Initialize weights and apply final processing
|
839 |
+
self.post_init()
|
840 |
+
|
841 |
+
@add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
842 |
+
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
843 |
+
def forward(
|
844 |
+
self,
|
845 |
+
input_ids: Optional[torch.LongTensor] = None,
|
846 |
+
bbox: Optional[torch.Tensor] = None,
|
847 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
848 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
849 |
+
position_ids: Optional[torch.LongTensor] = None,
|
850 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
851 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
852 |
+
labels: Optional[torch.LongTensor] = None,
|
853 |
+
output_attentions: Optional[bool] = None,
|
854 |
+
output_hidden_states: Optional[bool] = None,
|
855 |
+
return_dict: Optional[bool] = None,
|
856 |
+
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
|
857 |
+
r"""
|
858 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
859 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
860 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
861 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
862 |
+
|
863 |
+
Returns:
|
864 |
+
|
865 |
+
Examples:
|
866 |
+
|
867 |
+
```python
|
868 |
+
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
869 |
+
>>> from datasets import load_dataset
|
870 |
+
|
871 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
872 |
+
>>> model = AutoModelForSequenceClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
873 |
+
|
874 |
+
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
|
875 |
+
>>> example = dataset[0]
|
876 |
+
>>> words = example["tokens"]
|
877 |
+
>>> boxes = example["bboxes"]
|
878 |
+
|
879 |
+
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
|
880 |
+
|
881 |
+
>>> outputs = model(**encoding)
|
882 |
+
>>> predicted_class_idx = outputs.logits.argmax(-1).item()
|
883 |
+
>>> predicted_class = model.config.id2label[predicted_class_idx]
|
884 |
+
```"""
|
885 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
886 |
+
|
887 |
+
outputs = self.lilt(
|
888 |
+
input_ids,
|
889 |
+
bbox=bbox,
|
890 |
+
attention_mask=attention_mask,
|
891 |
+
token_type_ids=token_type_ids,
|
892 |
+
position_ids=position_ids,
|
893 |
+
head_mask=head_mask,
|
894 |
+
inputs_embeds=inputs_embeds,
|
895 |
+
output_attentions=output_attentions,
|
896 |
+
output_hidden_states=output_hidden_states,
|
897 |
+
return_dict=return_dict,
|
898 |
+
)
|
899 |
+
sequence_output = outputs[0]
|
900 |
+
logits = self.classifier(sequence_output)
|
901 |
+
|
902 |
+
loss = None
|
903 |
+
if labels is not None:
|
904 |
+
# move labels to correct device to enable model parallelism
|
905 |
+
labels = labels.to(logits.device)
|
906 |
+
if self.config.problem_type is None:
|
907 |
+
if self.num_labels == 1:
|
908 |
+
self.config.problem_type = "regression"
|
909 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
910 |
+
self.config.problem_type = "single_label_classification"
|
911 |
+
else:
|
912 |
+
self.config.problem_type = "multi_label_classification"
|
913 |
+
|
914 |
+
if self.config.problem_type == "regression":
|
915 |
+
loss_fct = MSELoss()
|
916 |
+
if self.num_labels == 1:
|
917 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
918 |
+
else:
|
919 |
+
loss = loss_fct(logits, labels)
|
920 |
+
elif self.config.problem_type == "single_label_classification":
|
921 |
+
loss_fct = CrossEntropyLoss()
|
922 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
923 |
+
elif self.config.problem_type == "multi_label_classification":
|
924 |
+
loss_fct = BCEWithLogitsLoss()
|
925 |
+
loss = loss_fct(logits, labels)
|
926 |
+
|
927 |
+
if not return_dict:
|
928 |
+
output = (logits,) + outputs[2:]
|
929 |
+
return ((loss,) + output) if loss is not None else output
|
930 |
+
|
931 |
+
return SequenceClassifierOutput(
|
932 |
+
loss=loss,
|
933 |
+
logits=logits,
|
934 |
+
hidden_states=outputs.hidden_states,
|
935 |
+
attentions=outputs.attentions,
|
936 |
+
)
|
937 |
+
|
938 |
+
|
939 |
+
@add_start_docstrings(
|
940 |
+
"""
|
941 |
+
Lilt Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
942 |
+
Named-Entity-Recognition (NER) tasks.
|
943 |
+
""",
|
944 |
+
LILT_START_DOCSTRING,
|
945 |
+
)
|
946 |
+
class LiltForTokenClassification(LiltPreTrainedModel):
|
947 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification.__init__ with Roberta->Lilt, roberta->lilt
|
948 |
+
def __init__(self, config):
|
949 |
+
super().__init__(config)
|
950 |
+
self.num_labels = config.num_labels
|
951 |
+
|
952 |
+
self.lilt = LiltModel(config, add_pooling_layer=False)
|
953 |
+
classifier_dropout = (
|
954 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
955 |
+
)
|
956 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
957 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
958 |
+
|
959 |
+
# Initialize weights and apply final processing
|
960 |
+
self.post_init()
|
961 |
+
|
962 |
+
@add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
963 |
+
@replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
964 |
+
def forward(
|
965 |
+
self,
|
966 |
+
input_ids: Optional[torch.LongTensor] = None,
|
967 |
+
bbox: Optional[torch.LongTensor] = None,
|
968 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
969 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
970 |
+
position_ids: Optional[torch.LongTensor] = None,
|
971 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
972 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
973 |
+
labels: Optional[torch.LongTensor] = None,
|
974 |
+
output_attentions: Optional[bool] = None,
|
975 |
+
output_hidden_states: Optional[bool] = None,
|
976 |
+
return_dict: Optional[bool] = None,
|
977 |
+
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
|
978 |
+
r"""
|
979 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
980 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
981 |
+
|
982 |
+
Returns:
|
983 |
+
|
984 |
+
Examples:
|
985 |
+
|
986 |
+
```python
|
987 |
+
>>> from transformers import AutoTokenizer, AutoModelForTokenClassification
|
988 |
+
>>> from datasets import load_dataset
|
989 |
+
|
990 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
991 |
+
>>> model = AutoModelForTokenClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
992 |
+
|
993 |
+
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
|
994 |
+
>>> example = dataset[0]
|
995 |
+
>>> words = example["tokens"]
|
996 |
+
>>> boxes = example["bboxes"]
|
997 |
+
|
998 |
+
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
|
999 |
+
|
1000 |
+
>>> outputs = model(**encoding)
|
1001 |
+
>>> predicted_class_indices = outputs.logits.argmax(-1)
|
1002 |
+
```"""
|
1003 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1004 |
+
|
1005 |
+
outputs = self.lilt(
|
1006 |
+
input_ids,
|
1007 |
+
bbox=bbox,
|
1008 |
+
attention_mask=attention_mask,
|
1009 |
+
token_type_ids=token_type_ids,
|
1010 |
+
position_ids=position_ids,
|
1011 |
+
head_mask=head_mask,
|
1012 |
+
inputs_embeds=inputs_embeds,
|
1013 |
+
output_attentions=output_attentions,
|
1014 |
+
output_hidden_states=output_hidden_states,
|
1015 |
+
return_dict=return_dict,
|
1016 |
+
)
|
1017 |
+
|
1018 |
+
sequence_output = outputs[0]
|
1019 |
+
|
1020 |
+
sequence_output = self.dropout(sequence_output)
|
1021 |
+
logits = self.classifier(sequence_output)
|
1022 |
+
|
1023 |
+
loss = None
|
1024 |
+
if labels is not None:
|
1025 |
+
# move labels to correct device to enable model parallelism
|
1026 |
+
labels = labels.to(logits.device)
|
1027 |
+
loss_fct = CrossEntropyLoss()
|
1028 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
1029 |
+
|
1030 |
+
if not return_dict:
|
1031 |
+
output = (logits,) + outputs[2:]
|
1032 |
+
return ((loss,) + output) if loss is not None else output
|
1033 |
+
|
1034 |
+
return TokenClassifierOutput(
|
1035 |
+
loss=loss,
|
1036 |
+
logits=logits,
|
1037 |
+
hidden_states=outputs.hidden_states,
|
1038 |
+
attentions=outputs.attentions,
|
1039 |
+
)
|
1040 |
+
|
1041 |
+
|
1042 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Lilt
|
1043 |
+
class LiltClassificationHead(nn.Module):
|
1044 |
+
"""Head for sentence-level classification tasks."""
|
1045 |
+
|
1046 |
+
def __init__(self, config):
|
1047 |
+
super().__init__()
|
1048 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
1049 |
+
classifier_dropout = (
|
1050 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
1051 |
+
)
|
1052 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1053 |
+
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
|
1054 |
+
|
1055 |
+
def forward(self, features, **kwargs):
|
1056 |
+
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
|
1057 |
+
x = self.dropout(x)
|
1058 |
+
x = self.dense(x)
|
1059 |
+
x = torch.tanh(x)
|
1060 |
+
x = self.dropout(x)
|
1061 |
+
x = self.out_proj(x)
|
1062 |
+
return x
|
1063 |
+
|
1064 |
+
|
1065 |
+
@add_start_docstrings(
|
1066 |
+
"""
|
1067 |
+
Lilt Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
|
1068 |
+
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
1069 |
+
""",
|
1070 |
+
LILT_START_DOCSTRING,
|
1071 |
+
)
|
1072 |
+
class LiltForQuestionAnswering(LiltPreTrainedModel):
|
1073 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering.__init__ with Roberta->Lilt, roberta->lilt
|
1074 |
+
def __init__(self, config):
|
1075 |
+
super().__init__(config)
|
1076 |
+
self.num_labels = config.num_labels
|
1077 |
+
|
1078 |
+
self.lilt = LiltModel(config, add_pooling_layer=False)
|
1079 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
1080 |
+
|
1081 |
+
# Initialize weights and apply final processing
|
1082 |
+
self.post_init()
|
1083 |
+
|
1084 |
+
@add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1085 |
+
@replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
|
1086 |
+
def forward(
|
1087 |
+
self,
|
1088 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1089 |
+
bbox: Optional[torch.LongTensor] = None,
|
1090 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1091 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1092 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1093 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1094 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1095 |
+
start_positions: Optional[torch.LongTensor] = None,
|
1096 |
+
end_positions: Optional[torch.LongTensor] = None,
|
1097 |
+
output_attentions: Optional[bool] = None,
|
1098 |
+
output_hidden_states: Optional[bool] = None,
|
1099 |
+
return_dict: Optional[bool] = None,
|
1100 |
+
) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
|
1101 |
+
r"""
|
1102 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1103 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
1104 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1105 |
+
are not taken into account for computing the loss.
|
1106 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1107 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
1108 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1109 |
+
are not taken into account for computing the loss.
|
1110 |
+
|
1111 |
+
Returns:
|
1112 |
+
|
1113 |
+
Examples:
|
1114 |
+
|
1115 |
+
```python
|
1116 |
+
>>> from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
1117 |
+
>>> from datasets import load_dataset
|
1118 |
+
|
1119 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
1120 |
+
>>> model = AutoModelForQuestionAnswering.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
|
1121 |
+
|
1122 |
+
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
|
1123 |
+
>>> example = dataset[0]
|
1124 |
+
>>> words = example["tokens"]
|
1125 |
+
>>> boxes = example["bboxes"]
|
1126 |
+
|
1127 |
+
>>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
|
1128 |
+
|
1129 |
+
>>> outputs = model(**encoding)
|
1130 |
+
|
1131 |
+
>>> answer_start_index = outputs.start_logits.argmax()
|
1132 |
+
>>> answer_end_index = outputs.end_logits.argmax()
|
1133 |
+
|
1134 |
+
>>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
|
1135 |
+
>>> predicted_answer = tokenizer.decode(predict_answer_tokens)
|
1136 |
+
```"""
|
1137 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1138 |
+
|
1139 |
+
outputs = self.lilt(
|
1140 |
+
input_ids,
|
1141 |
+
bbox=bbox,
|
1142 |
+
attention_mask=attention_mask,
|
1143 |
+
token_type_ids=token_type_ids,
|
1144 |
+
position_ids=position_ids,
|
1145 |
+
head_mask=head_mask,
|
1146 |
+
inputs_embeds=inputs_embeds,
|
1147 |
+
output_attentions=output_attentions,
|
1148 |
+
output_hidden_states=output_hidden_states,
|
1149 |
+
return_dict=return_dict,
|
1150 |
+
)
|
1151 |
+
|
1152 |
+
sequence_output = outputs[0]
|
1153 |
+
|
1154 |
+
logits = self.qa_outputs(sequence_output)
|
1155 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
1156 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
1157 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
1158 |
+
|
1159 |
+
total_loss = None
|
1160 |
+
if start_positions is not None and end_positions is not None:
|
1161 |
+
# If we are on multi-GPU, split add a dimension
|
1162 |
+
if len(start_positions.size()) > 1:
|
1163 |
+
start_positions = start_positions.squeeze(-1)
|
1164 |
+
if len(end_positions.size()) > 1:
|
1165 |
+
end_positions = end_positions.squeeze(-1)
|
1166 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
1167 |
+
ignored_index = start_logits.size(1)
|
1168 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
1169 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
1170 |
+
|
1171 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
1172 |
+
start_loss = loss_fct(start_logits, start_positions)
|
1173 |
+
end_loss = loss_fct(end_logits, end_positions)
|
1174 |
+
total_loss = (start_loss + end_loss) / 2
|
1175 |
+
|
1176 |
+
if not return_dict:
|
1177 |
+
output = (start_logits, end_logits) + outputs[2:]
|
1178 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
1179 |
+
|
1180 |
+
return QuestionAnsweringModelOutput(
|
1181 |
+
loss=total_loss,
|
1182 |
+
start_logits=start_logits,
|
1183 |
+
end_logits=end_logits,
|
1184 |
+
hidden_states=outputs.hidden_states,
|
1185 |
+
attentions=outputs.attentions,
|
1186 |
+
)
|