Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py +97 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/convert_convnextv2_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py +117 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py +286 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py +574 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py +681 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dinov2_depth_to_hf.py +384 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__init__.py +70 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py +162 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py +1820 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +144 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +1504 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__init__.py +62 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/configuration_gpt_neox_japanese.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/modeling_gpt_neox_japanese.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/tokenization_gpt_neox_japanese.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +120 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +729 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +368 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py +110 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py +172 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py +312 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/feature_extraction_mobilevit.py +33 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/image_processing_mobilevit.py +493 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_mobilevit.py +1066 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_tf_mobilevit.py +1373 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__init__.py +71 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/configuration_seggpt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/convert_seggpt_to_hf.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/image_processing_seggpt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/modeling_seggpt.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/configuration_seggpt.py +144 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/convert_seggpt_to_hf.py +222 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/image_processing_seggpt.py +626 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa
|
2 |
+
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
3 |
+
# module, but to preserve other warnings. So, don't check this module at all.
|
4 |
+
|
5 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
6 |
+
#
|
7 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
8 |
+
# you may not use this file except in compliance with the License.
|
9 |
+
# You may obtain a copy of the License at
|
10 |
+
#
|
11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
12 |
+
#
|
13 |
+
# Unless required by applicable law or agreed to in writing, software
|
14 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
15 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
16 |
+
# See the License for the specific language governing permissions and
|
17 |
+
# limitations under the License.
|
18 |
+
from typing import TYPE_CHECKING
|
19 |
+
|
20 |
+
# rely on isort to merge the imports
|
21 |
+
from ...utils import (
|
22 |
+
OptionalDependencyNotAvailable,
|
23 |
+
_LazyModule,
|
24 |
+
is_torch_available,
|
25 |
+
is_tf_available,
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
_import_structure = {
|
30 |
+
"configuration_convnextv2": [
|
31 |
+
"CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
32 |
+
"ConvNextV2Config",
|
33 |
+
]
|
34 |
+
}
|
35 |
+
|
36 |
+
try:
|
37 |
+
if not is_torch_available():
|
38 |
+
raise OptionalDependencyNotAvailable()
|
39 |
+
except OptionalDependencyNotAvailable:
|
40 |
+
pass
|
41 |
+
else:
|
42 |
+
_import_structure["modeling_convnextv2"] = [
|
43 |
+
"CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST",
|
44 |
+
"ConvNextV2ForImageClassification",
|
45 |
+
"ConvNextV2Model",
|
46 |
+
"ConvNextV2PreTrainedModel",
|
47 |
+
"ConvNextV2Backbone",
|
48 |
+
]
|
49 |
+
|
50 |
+
try:
|
51 |
+
if not is_tf_available():
|
52 |
+
raise OptionalDependencyNotAvailable()
|
53 |
+
except OptionalDependencyNotAvailable:
|
54 |
+
pass
|
55 |
+
else:
|
56 |
+
_import_structure["modeling_tf_convnextv2"] = [
|
57 |
+
"TFConvNextV2ForImageClassification",
|
58 |
+
"TFConvNextV2Model",
|
59 |
+
"TFConvNextV2PreTrainedModel",
|
60 |
+
]
|
61 |
+
|
62 |
+
if TYPE_CHECKING:
|
63 |
+
from .configuration_convnextv2 import (
|
64 |
+
CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
65 |
+
ConvNextV2Config,
|
66 |
+
)
|
67 |
+
|
68 |
+
try:
|
69 |
+
if not is_torch_available():
|
70 |
+
raise OptionalDependencyNotAvailable()
|
71 |
+
except OptionalDependencyNotAvailable:
|
72 |
+
pass
|
73 |
+
else:
|
74 |
+
from .modeling_convnextv2 import (
|
75 |
+
CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST,
|
76 |
+
ConvNextV2Backbone,
|
77 |
+
ConvNextV2ForImageClassification,
|
78 |
+
ConvNextV2Model,
|
79 |
+
ConvNextV2PreTrainedModel,
|
80 |
+
)
|
81 |
+
|
82 |
+
try:
|
83 |
+
if not is_tf_available():
|
84 |
+
raise OptionalDependencyNotAvailable()
|
85 |
+
except OptionalDependencyNotAvailable:
|
86 |
+
pass
|
87 |
+
else:
|
88 |
+
from .modeling_tf_convnextv2 import (
|
89 |
+
TFConvNextV2ForImageClassification,
|
90 |
+
TFConvNextV2Model,
|
91 |
+
TFConvNextV2PreTrainedModel,
|
92 |
+
)
|
93 |
+
|
94 |
+
else:
|
95 |
+
import sys
|
96 |
+
|
97 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc
ADDED
Binary file (5.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/convert_convnextv2_to_pytorch.cpython-310.pyc
ADDED
Binary file (9.14 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc
ADDED
Binary file (22.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" ConvNeXTV2 model configuration"""
|
16 |
+
|
17 |
+
|
18 |
+
from ...configuration_utils import PretrainedConfig
|
19 |
+
from ...utils import logging
|
20 |
+
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
from ..deprecated._archive_maps import CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
27 |
+
|
28 |
+
|
29 |
+
class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig):
|
30 |
+
r"""
|
31 |
+
This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an
|
32 |
+
ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a
|
33 |
+
configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2
|
34 |
+
[facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture.
|
35 |
+
|
36 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
+
documentation from [`PretrainedConfig`] for more information.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
num_channels (`int`, *optional*, defaults to 3):
|
41 |
+
The number of input channels.
|
42 |
+
patch_size (`int`, optional, defaults to 4):
|
43 |
+
Patch size to use in the patch embedding layer.
|
44 |
+
num_stages (`int`, optional, defaults to 4):
|
45 |
+
The number of stages in the model.
|
46 |
+
hidden_sizes (`List[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
|
47 |
+
Dimensionality (hidden size) at each stage.
|
48 |
+
depths (`List[int]`, *optional*, defaults to `[3, 3, 9, 3]`):
|
49 |
+
Depth (number of blocks) for each stage.
|
50 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
51 |
+
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
|
52 |
+
`"selu"` and `"gelu_new"` are supported.
|
53 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
54 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
55 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
56 |
+
The epsilon used by the layer normalization layers.
|
57 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
58 |
+
The drop rate for stochastic depth.
|
59 |
+
out_features (`List[str]`, *optional*):
|
60 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
61 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
62 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
|
63 |
+
same order as defined in the `stage_names` attribute.
|
64 |
+
out_indices (`List[int]`, *optional*):
|
65 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
66 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
67 |
+
If unset and `out_features` is unset, will default to the last stage. Must be in the
|
68 |
+
same order as defined in the `stage_names` attribute.
|
69 |
+
|
70 |
+
Example:
|
71 |
+
```python
|
72 |
+
>>> from transformers import ConvNeXTV2Config, ConvNextV2Model
|
73 |
+
|
74 |
+
>>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration
|
75 |
+
>>> configuration = ConvNeXTV2Config()
|
76 |
+
|
77 |
+
>>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration
|
78 |
+
>>> model = ConvNextV2Model(configuration)
|
79 |
+
|
80 |
+
>>> # Accessing the model configuration
|
81 |
+
>>> configuration = model.config
|
82 |
+
```"""
|
83 |
+
|
84 |
+
model_type = "convnextv2"
|
85 |
+
|
86 |
+
def __init__(
|
87 |
+
self,
|
88 |
+
num_channels=3,
|
89 |
+
patch_size=4,
|
90 |
+
num_stages=4,
|
91 |
+
hidden_sizes=None,
|
92 |
+
depths=None,
|
93 |
+
hidden_act="gelu",
|
94 |
+
initializer_range=0.02,
|
95 |
+
layer_norm_eps=1e-12,
|
96 |
+
drop_path_rate=0.0,
|
97 |
+
image_size=224,
|
98 |
+
out_features=None,
|
99 |
+
out_indices=None,
|
100 |
+
**kwargs,
|
101 |
+
):
|
102 |
+
super().__init__(**kwargs)
|
103 |
+
|
104 |
+
self.num_channels = num_channels
|
105 |
+
self.patch_size = patch_size
|
106 |
+
self.num_stages = num_stages
|
107 |
+
self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
|
108 |
+
self.depths = [3, 3, 9, 3] if depths is None else depths
|
109 |
+
self.hidden_act = hidden_act
|
110 |
+
self.initializer_range = initializer_range
|
111 |
+
self.layer_norm_eps = layer_norm_eps
|
112 |
+
self.drop_path_rate = drop_path_rate
|
113 |
+
self.image_size = image_size
|
114 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
|
115 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
116 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
117 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py
ADDED
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert ConvNeXTV2 checkpoints from the original repository.
|
16 |
+
|
17 |
+
URL: https://github.com/facebookresearch/ConvNeXt"""
|
18 |
+
|
19 |
+
import argparse
|
20 |
+
import json
|
21 |
+
import os
|
22 |
+
|
23 |
+
import requests
|
24 |
+
import torch
|
25 |
+
from huggingface_hub import hf_hub_download
|
26 |
+
from PIL import Image
|
27 |
+
|
28 |
+
from transformers import ConvNextImageProcessor, ConvNextV2Config, ConvNextV2ForImageClassification
|
29 |
+
from transformers.image_utils import PILImageResampling
|
30 |
+
from transformers.utils import logging
|
31 |
+
|
32 |
+
|
33 |
+
logging.set_verbosity_info()
|
34 |
+
logger = logging.get_logger(__name__)
|
35 |
+
|
36 |
+
|
37 |
+
def get_convnextv2_config(checkpoint_url):
|
38 |
+
config = ConvNextV2Config()
|
39 |
+
|
40 |
+
if "atto" in checkpoint_url:
|
41 |
+
depths = [2, 2, 6, 2]
|
42 |
+
hidden_sizes = [40, 80, 160, 320]
|
43 |
+
if "femto" in checkpoint_url:
|
44 |
+
depths = [2, 2, 6, 2]
|
45 |
+
hidden_sizes = [48, 96, 192, 384]
|
46 |
+
if "pico" in checkpoint_url:
|
47 |
+
depths = [2, 2, 6, 2]
|
48 |
+
hidden_sizes = [64, 128, 256, 512]
|
49 |
+
if "nano" in checkpoint_url:
|
50 |
+
depths = [2, 2, 8, 2]
|
51 |
+
hidden_sizes = [80, 160, 320, 640]
|
52 |
+
if "tiny" in checkpoint_url:
|
53 |
+
depths = [3, 3, 9, 3]
|
54 |
+
hidden_sizes = [96, 192, 384, 768]
|
55 |
+
if "base" in checkpoint_url:
|
56 |
+
depths = [3, 3, 27, 3]
|
57 |
+
hidden_sizes = [128, 256, 512, 1024]
|
58 |
+
if "large" in checkpoint_url:
|
59 |
+
depths = [3, 3, 27, 3]
|
60 |
+
hidden_sizes = [192, 384, 768, 1536]
|
61 |
+
if "huge" in checkpoint_url:
|
62 |
+
depths = [3, 3, 27, 3]
|
63 |
+
hidden_sizes = [352, 704, 1408, 2816]
|
64 |
+
|
65 |
+
num_labels = 1000
|
66 |
+
filename = "imagenet-1k-id2label.json"
|
67 |
+
expected_shape = (1, 1000)
|
68 |
+
|
69 |
+
repo_id = "huggingface/label-files"
|
70 |
+
config.num_labels = num_labels
|
71 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
72 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
73 |
+
|
74 |
+
config.id2label = id2label
|
75 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
76 |
+
config.hidden_sizes = hidden_sizes
|
77 |
+
config.depths = depths
|
78 |
+
|
79 |
+
return config, expected_shape
|
80 |
+
|
81 |
+
|
82 |
+
def rename_key(name):
|
83 |
+
if "downsample_layers.0.0" in name:
|
84 |
+
name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
|
85 |
+
if "downsample_layers.0.1" in name:
|
86 |
+
name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
|
87 |
+
if "downsample_layers.1.0" in name:
|
88 |
+
name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
|
89 |
+
if "downsample_layers.1.1" in name:
|
90 |
+
name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
|
91 |
+
if "downsample_layers.2.0" in name:
|
92 |
+
name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
|
93 |
+
if "downsample_layers.2.1" in name:
|
94 |
+
name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
|
95 |
+
if "downsample_layers.3.0" in name:
|
96 |
+
name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
|
97 |
+
if "downsample_layers.3.1" in name:
|
98 |
+
name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
|
99 |
+
if "stages" in name and "downsampling_layer" not in name:
|
100 |
+
# stages.0.0. for instance should be renamed to stages.0.layers.0.
|
101 |
+
name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
|
102 |
+
if "gamma" in name:
|
103 |
+
name = name.replace("gamma", "weight")
|
104 |
+
if "beta" in name:
|
105 |
+
name = name.replace("beta", "bias")
|
106 |
+
if "stages" in name:
|
107 |
+
name = name.replace("stages", "encoder.stages")
|
108 |
+
if "norm" in name:
|
109 |
+
name = name.replace("norm", "layernorm")
|
110 |
+
if "head" in name:
|
111 |
+
name = name.replace("head", "classifier")
|
112 |
+
|
113 |
+
return name
|
114 |
+
|
115 |
+
|
116 |
+
# We will verify our results on an image of cute cats
|
117 |
+
def prepare_img():
|
118 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
119 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
120 |
+
return im
|
121 |
+
|
122 |
+
|
123 |
+
def convert_preprocessor(checkpoint_url):
|
124 |
+
if "224" in checkpoint_url:
|
125 |
+
size = 224
|
126 |
+
crop_pct = 224 / 256
|
127 |
+
elif "384" in checkpoint_url:
|
128 |
+
size = 384
|
129 |
+
crop_pct = None
|
130 |
+
else:
|
131 |
+
size = 512
|
132 |
+
crop_pct = None
|
133 |
+
|
134 |
+
return ConvNextImageProcessor(
|
135 |
+
size=size,
|
136 |
+
crop_pct=crop_pct,
|
137 |
+
image_mean=[0.485, 0.456, 0.406],
|
138 |
+
image_std=[0.229, 0.224, 0.225],
|
139 |
+
resample=PILImageResampling.BICUBIC,
|
140 |
+
)
|
141 |
+
|
142 |
+
|
143 |
+
@torch.no_grad()
|
144 |
+
def convert_convnextv2_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub):
|
145 |
+
"""
|
146 |
+
Copy/paste/tweak model's weights to our ConvNeXTV2 structure.
|
147 |
+
"""
|
148 |
+
print("Downloading original model from checkpoint...")
|
149 |
+
# define ConvNeXTV2 configuration based on URL
|
150 |
+
config, expected_shape = get_convnextv2_config(checkpoint_url)
|
151 |
+
# load original state_dict from URL
|
152 |
+
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
|
153 |
+
|
154 |
+
print("Converting model parameters...")
|
155 |
+
# rename keys
|
156 |
+
for key in state_dict.copy().keys():
|
157 |
+
val = state_dict.pop(key)
|
158 |
+
state_dict[rename_key(key)] = val
|
159 |
+
# add prefix to all keys expect classifier head
|
160 |
+
for key in state_dict.copy().keys():
|
161 |
+
val = state_dict.pop(key)
|
162 |
+
if not key.startswith("classifier"):
|
163 |
+
key = "convnextv2." + key
|
164 |
+
state_dict[key] = val
|
165 |
+
|
166 |
+
# load HuggingFace model
|
167 |
+
model = ConvNextV2ForImageClassification(config)
|
168 |
+
model.load_state_dict(state_dict)
|
169 |
+
model.eval()
|
170 |
+
|
171 |
+
# Check outputs on an image, prepared by ConvNextImageProcessor
|
172 |
+
preprocessor = convert_preprocessor(checkpoint_url)
|
173 |
+
inputs = preprocessor(images=prepare_img(), return_tensors="pt")
|
174 |
+
logits = model(**inputs).logits
|
175 |
+
|
176 |
+
# note: the logits below were obtained without center cropping
|
177 |
+
if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt":
|
178 |
+
expected_logits = torch.tensor([-0.3930, 0.1747, -0.5246, 0.4177, 0.4295])
|
179 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt":
|
180 |
+
expected_logits = torch.tensor([-0.1727, -0.5341, -0.7818, -0.4745, -0.6566])
|
181 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt":
|
182 |
+
expected_logits = torch.tensor([-0.0333, 0.1563, -0.9137, 0.1054, 0.0381])
|
183 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt":
|
184 |
+
expected_logits = torch.tensor([-0.1744, -0.1555, -0.0713, 0.0950, -0.1431])
|
185 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt":
|
186 |
+
expected_logits = torch.tensor([0.9996, 0.1966, -0.4386, -0.3472, 0.6661])
|
187 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt":
|
188 |
+
expected_logits = torch.tensor([-0.2553, -0.6708, -0.1359, 0.2518, -0.2488])
|
189 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt":
|
190 |
+
expected_logits = torch.tensor([-0.0673, -0.5627, -0.3753, -0.2722, 0.0178])
|
191 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt":
|
192 |
+
expected_logits = torch.tensor([-0.6377, -0.7458, -0.2150, 0.1184, -0.0597])
|
193 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt":
|
194 |
+
expected_logits = torch.tensor([1.0799, 0.2322, -0.8860, 1.0219, 0.6231])
|
195 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt":
|
196 |
+
expected_logits = torch.tensor([0.3766, 0.4917, -1.1426, 0.9942, 0.6024])
|
197 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt":
|
198 |
+
expected_logits = torch.tensor([0.4220, -0.6919, -0.4317, -0.2881, -0.6609])
|
199 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt":
|
200 |
+
expected_logits = torch.tensor([0.1082, -0.8286, -0.5095, 0.4681, -0.8085])
|
201 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt":
|
202 |
+
expected_logits = torch.tensor([-0.2419, -0.6221, 0.2176, -0.0980, -0.7527])
|
203 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt":
|
204 |
+
expected_logits = torch.tensor([0.0391, -0.4371, 0.3786, 0.1251, -0.2784])
|
205 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt":
|
206 |
+
expected_logits = torch.tensor([-0.0504, 0.5636, -0.1729, -0.6507, -0.3949])
|
207 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt":
|
208 |
+
expected_logits = torch.tensor([0.3560, 0.9486, 0.3149, -0.2667, -0.5138])
|
209 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt":
|
210 |
+
expected_logits = torch.tensor([-0.2469, -0.4550, -0.5853, -0.0810, 0.0309])
|
211 |
+
elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt":
|
212 |
+
expected_logits = torch.tensor([-0.3090, 0.0802, -0.0682, -0.1979, -0.2826])
|
213 |
+
else:
|
214 |
+
raise ValueError(f"Unknown URL: {checkpoint_url}")
|
215 |
+
|
216 |
+
assert torch.allclose(logits[0, :5], expected_logits, atol=1e-3)
|
217 |
+
assert logits.shape == expected_shape
|
218 |
+
print("Model outputs match the original results!")
|
219 |
+
|
220 |
+
if save_model:
|
221 |
+
print("Saving model to local...")
|
222 |
+
# Create folder to save model
|
223 |
+
if not os.path.isdir(pytorch_dump_folder_path):
|
224 |
+
os.mkdir(pytorch_dump_folder_path)
|
225 |
+
|
226 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
227 |
+
preprocessor.save_pretrained(pytorch_dump_folder_path)
|
228 |
+
|
229 |
+
model_name = "convnextv2"
|
230 |
+
if "atto" in checkpoint_url:
|
231 |
+
model_name += "-atto"
|
232 |
+
if "femto" in checkpoint_url:
|
233 |
+
model_name += "-femto"
|
234 |
+
if "pico" in checkpoint_url:
|
235 |
+
model_name += "-pico"
|
236 |
+
if "nano" in checkpoint_url:
|
237 |
+
model_name += "-nano"
|
238 |
+
elif "tiny" in checkpoint_url:
|
239 |
+
model_name += "-tiny"
|
240 |
+
elif "base" in checkpoint_url:
|
241 |
+
model_name += "-base"
|
242 |
+
elif "large" in checkpoint_url:
|
243 |
+
model_name += "-large"
|
244 |
+
elif "huge" in checkpoint_url:
|
245 |
+
model_name += "-huge"
|
246 |
+
if "22k" in checkpoint_url and "1k" not in checkpoint_url:
|
247 |
+
model_name += "-22k"
|
248 |
+
elif "22k" in checkpoint_url and "1k" in checkpoint_url:
|
249 |
+
model_name += "-22k-1k"
|
250 |
+
elif "1k" in checkpoint_url:
|
251 |
+
model_name += "-1k"
|
252 |
+
if "224" in checkpoint_url:
|
253 |
+
model_name += "-224"
|
254 |
+
elif "384" in checkpoint_url:
|
255 |
+
model_name += "-384"
|
256 |
+
elif "512" in checkpoint_url:
|
257 |
+
model_name += "-512"
|
258 |
+
|
259 |
+
if push_to_hub:
|
260 |
+
print(f"Pushing {model_name} to the hub...")
|
261 |
+
model.push_to_hub(model_name)
|
262 |
+
preprocessor.push_to_hub(model_name)
|
263 |
+
|
264 |
+
|
265 |
+
if __name__ == "__main__":
|
266 |
+
parser = argparse.ArgumentParser()
|
267 |
+
# Required parameters
|
268 |
+
parser.add_argument(
|
269 |
+
"--checkpoint_url",
|
270 |
+
default="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt",
|
271 |
+
type=str,
|
272 |
+
help="URL of the original ConvNeXTV2 checkpoint you'd like to convert.",
|
273 |
+
)
|
274 |
+
parser.add_argument(
|
275 |
+
"--pytorch_dump_folder_path",
|
276 |
+
default="model",
|
277 |
+
type=str,
|
278 |
+
help="Path to the output PyTorch model directory.",
|
279 |
+
)
|
280 |
+
parser.add_argument("--save_model", action="store_true", help="Save model to local")
|
281 |
+
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub")
|
282 |
+
|
283 |
+
args = parser.parse_args()
|
284 |
+
convert_convnextv2_checkpoint(
|
285 |
+
args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub
|
286 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py
ADDED
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch ConvNextV2 model."""
|
16 |
+
|
17 |
+
|
18 |
+
from typing import Optional, Tuple, Union
|
19 |
+
|
20 |
+
import torch
|
21 |
+
import torch.utils.checkpoint
|
22 |
+
from torch import nn
|
23 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
24 |
+
|
25 |
+
from ...activations import ACT2FN
|
26 |
+
from ...modeling_outputs import (
|
27 |
+
BackboneOutput,
|
28 |
+
BaseModelOutputWithNoAttention,
|
29 |
+
BaseModelOutputWithPoolingAndNoAttention,
|
30 |
+
ImageClassifierOutputWithNoAttention,
|
31 |
+
)
|
32 |
+
from ...modeling_utils import PreTrainedModel
|
33 |
+
from ...utils import (
|
34 |
+
add_code_sample_docstrings,
|
35 |
+
add_start_docstrings,
|
36 |
+
add_start_docstrings_to_model_forward,
|
37 |
+
logging,
|
38 |
+
replace_return_docstrings,
|
39 |
+
)
|
40 |
+
from ...utils.backbone_utils import BackboneMixin
|
41 |
+
from .configuration_convnextv2 import ConvNextV2Config
|
42 |
+
|
43 |
+
|
44 |
+
logger = logging.get_logger(__name__)
|
45 |
+
|
46 |
+
# General docstring
|
47 |
+
_CONFIG_FOR_DOC = "ConvNextV2Config"
|
48 |
+
|
49 |
+
# Base docstring
|
50 |
+
_CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
|
51 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
|
52 |
+
|
53 |
+
# Image classification docstring
|
54 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
|
55 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
56 |
+
|
57 |
+
|
58 |
+
from ..deprecated._archive_maps import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
59 |
+
|
60 |
+
|
61 |
+
# Copied from transformers.models.beit.modeling_beit.drop_path
|
62 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
63 |
+
"""
|
64 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
65 |
+
|
66 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
67 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
68 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
69 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
70 |
+
argument.
|
71 |
+
"""
|
72 |
+
if drop_prob == 0.0 or not training:
|
73 |
+
return input
|
74 |
+
keep_prob = 1 - drop_prob
|
75 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
76 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
77 |
+
random_tensor.floor_() # binarize
|
78 |
+
output = input.div(keep_prob) * random_tensor
|
79 |
+
return output
|
80 |
+
|
81 |
+
|
82 |
+
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNextV2
|
83 |
+
class ConvNextV2DropPath(nn.Module):
|
84 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
85 |
+
|
86 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
87 |
+
super().__init__()
|
88 |
+
self.drop_prob = drop_prob
|
89 |
+
|
90 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
91 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
92 |
+
|
93 |
+
def extra_repr(self) -> str:
|
94 |
+
return "p={}".format(self.drop_prob)
|
95 |
+
|
96 |
+
|
97 |
+
class ConvNextV2GRN(nn.Module):
|
98 |
+
"""GRN (Global Response Normalization) layer"""
|
99 |
+
|
100 |
+
def __init__(self, dim: int):
|
101 |
+
super().__init__()
|
102 |
+
self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
103 |
+
self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
104 |
+
|
105 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
106 |
+
# Compute and normalize global spatial feature maps
|
107 |
+
global_features = torch.norm(hidden_states, p=2, dim=(1, 2), keepdim=True)
|
108 |
+
norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-6)
|
109 |
+
hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
|
110 |
+
|
111 |
+
return hidden_states
|
112 |
+
|
113 |
+
|
114 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->ConvNextV2
|
115 |
+
class ConvNextV2LayerNorm(nn.Module):
|
116 |
+
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
|
117 |
+
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
|
118 |
+
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
|
119 |
+
"""
|
120 |
+
|
121 |
+
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
|
122 |
+
super().__init__()
|
123 |
+
self.weight = nn.Parameter(torch.ones(normalized_shape))
|
124 |
+
self.bias = nn.Parameter(torch.zeros(normalized_shape))
|
125 |
+
self.eps = eps
|
126 |
+
self.data_format = data_format
|
127 |
+
if self.data_format not in ["channels_last", "channels_first"]:
|
128 |
+
raise NotImplementedError(f"Unsupported data format: {self.data_format}")
|
129 |
+
self.normalized_shape = (normalized_shape,)
|
130 |
+
|
131 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
132 |
+
if self.data_format == "channels_last":
|
133 |
+
x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
134 |
+
elif self.data_format == "channels_first":
|
135 |
+
input_dtype = x.dtype
|
136 |
+
x = x.float()
|
137 |
+
u = x.mean(1, keepdim=True)
|
138 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
139 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
140 |
+
x = x.to(dtype=input_dtype)
|
141 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
142 |
+
return x
|
143 |
+
|
144 |
+
|
145 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextEmbeddings with ConvNext->ConvNextV2
|
146 |
+
class ConvNextV2Embeddings(nn.Module):
|
147 |
+
"""This class is comparable to (and inspired by) the SwinEmbeddings class
|
148 |
+
found in src/transformers/models/swin/modeling_swin.py.
|
149 |
+
"""
|
150 |
+
|
151 |
+
def __init__(self, config):
|
152 |
+
super().__init__()
|
153 |
+
self.patch_embeddings = nn.Conv2d(
|
154 |
+
config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
|
155 |
+
)
|
156 |
+
self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
|
157 |
+
self.num_channels = config.num_channels
|
158 |
+
|
159 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
160 |
+
num_channels = pixel_values.shape[1]
|
161 |
+
if num_channels != self.num_channels:
|
162 |
+
raise ValueError(
|
163 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
164 |
+
)
|
165 |
+
embeddings = self.patch_embeddings(pixel_values)
|
166 |
+
embeddings = self.layernorm(embeddings)
|
167 |
+
return embeddings
|
168 |
+
|
169 |
+
|
170 |
+
class ConvNextV2Layer(nn.Module):
|
171 |
+
"""This corresponds to the `Block` class in the original implementation.
|
172 |
+
|
173 |
+
There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
|
174 |
+
H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
|
175 |
+
|
176 |
+
The authors used (2) as they find it slightly faster in PyTorch.
|
177 |
+
|
178 |
+
Args:
|
179 |
+
config ([`ConvNextV2Config`]): Model configuration class.
|
180 |
+
dim (`int`): Number of input channels.
|
181 |
+
drop_path (`float`): Stochastic depth rate. Default: 0.0.
|
182 |
+
"""
|
183 |
+
|
184 |
+
def __init__(self, config, dim, drop_path=0):
|
185 |
+
super().__init__()
|
186 |
+
# depthwise conv
|
187 |
+
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
|
188 |
+
self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-6)
|
189 |
+
# pointwise/1x1 convs, implemented with linear layers
|
190 |
+
self.pwconv1 = nn.Linear(dim, 4 * dim)
|
191 |
+
self.act = ACT2FN[config.hidden_act]
|
192 |
+
self.grn = ConvNextV2GRN(4 * dim)
|
193 |
+
self.pwconv2 = nn.Linear(4 * dim, dim)
|
194 |
+
self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
195 |
+
|
196 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
197 |
+
input = hidden_states
|
198 |
+
x = self.dwconv(hidden_states)
|
199 |
+
# (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
|
200 |
+
x = x.permute(0, 2, 3, 1)
|
201 |
+
x = self.layernorm(x)
|
202 |
+
x = self.pwconv1(x)
|
203 |
+
x = self.act(x)
|
204 |
+
x = self.grn(x)
|
205 |
+
x = self.pwconv2(x)
|
206 |
+
# (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
|
207 |
+
x = x.permute(0, 3, 1, 2)
|
208 |
+
|
209 |
+
x = input + self.drop_path(x)
|
210 |
+
return x
|
211 |
+
|
212 |
+
|
213 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextStage with ConvNeXT->ConvNeXTV2, ConvNext->ConvNextV2
|
214 |
+
class ConvNextV2Stage(nn.Module):
|
215 |
+
"""ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
|
216 |
+
|
217 |
+
Args:
|
218 |
+
config ([`ConvNextV2Config`]): Model configuration class.
|
219 |
+
in_channels (`int`): Number of input channels.
|
220 |
+
out_channels (`int`): Number of output channels.
|
221 |
+
depth (`int`): Number of residual blocks.
|
222 |
+
drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
|
223 |
+
"""
|
224 |
+
|
225 |
+
def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
|
226 |
+
super().__init__()
|
227 |
+
|
228 |
+
if in_channels != out_channels or stride > 1:
|
229 |
+
self.downsampling_layer = nn.Sequential(
|
230 |
+
ConvNextV2LayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
|
231 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
|
232 |
+
)
|
233 |
+
else:
|
234 |
+
self.downsampling_layer = nn.Identity()
|
235 |
+
drop_path_rates = drop_path_rates or [0.0] * depth
|
236 |
+
self.layers = nn.Sequential(
|
237 |
+
*[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
|
238 |
+
)
|
239 |
+
|
240 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
241 |
+
hidden_states = self.downsampling_layer(hidden_states)
|
242 |
+
hidden_states = self.layers(hidden_states)
|
243 |
+
return hidden_states
|
244 |
+
|
245 |
+
|
246 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextEncoder with ConvNext->ConvNextV2
|
247 |
+
class ConvNextV2Encoder(nn.Module):
|
248 |
+
def __init__(self, config):
|
249 |
+
super().__init__()
|
250 |
+
self.stages = nn.ModuleList()
|
251 |
+
drop_path_rates = [
|
252 |
+
x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
|
253 |
+
]
|
254 |
+
prev_chs = config.hidden_sizes[0]
|
255 |
+
for i in range(config.num_stages):
|
256 |
+
out_chs = config.hidden_sizes[i]
|
257 |
+
stage = ConvNextV2Stage(
|
258 |
+
config,
|
259 |
+
in_channels=prev_chs,
|
260 |
+
out_channels=out_chs,
|
261 |
+
stride=2 if i > 0 else 1,
|
262 |
+
depth=config.depths[i],
|
263 |
+
drop_path_rates=drop_path_rates[i],
|
264 |
+
)
|
265 |
+
self.stages.append(stage)
|
266 |
+
prev_chs = out_chs
|
267 |
+
|
268 |
+
def forward(
|
269 |
+
self,
|
270 |
+
hidden_states: torch.FloatTensor,
|
271 |
+
output_hidden_states: Optional[bool] = False,
|
272 |
+
return_dict: Optional[bool] = True,
|
273 |
+
) -> Union[Tuple, BaseModelOutputWithNoAttention]:
|
274 |
+
all_hidden_states = () if output_hidden_states else None
|
275 |
+
|
276 |
+
for i, layer_module in enumerate(self.stages):
|
277 |
+
if output_hidden_states:
|
278 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
279 |
+
|
280 |
+
hidden_states = layer_module(hidden_states)
|
281 |
+
|
282 |
+
if output_hidden_states:
|
283 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
284 |
+
|
285 |
+
if not return_dict:
|
286 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
287 |
+
|
288 |
+
return BaseModelOutputWithNoAttention(
|
289 |
+
last_hidden_state=hidden_states,
|
290 |
+
hidden_states=all_hidden_states,
|
291 |
+
)
|
292 |
+
|
293 |
+
|
294 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextPreTrainedModel with ConvNext->ConvNextV2, convnext->convnextv2
|
295 |
+
class ConvNextV2PreTrainedModel(PreTrainedModel):
|
296 |
+
"""
|
297 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
298 |
+
models.
|
299 |
+
"""
|
300 |
+
|
301 |
+
config_class = ConvNextV2Config
|
302 |
+
base_model_prefix = "convnextv2"
|
303 |
+
main_input_name = "pixel_values"
|
304 |
+
|
305 |
+
def _init_weights(self, module):
|
306 |
+
"""Initialize the weights"""
|
307 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
308 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
309 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
310 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
311 |
+
if module.bias is not None:
|
312 |
+
module.bias.data.zero_()
|
313 |
+
elif isinstance(module, nn.LayerNorm):
|
314 |
+
module.bias.data.zero_()
|
315 |
+
module.weight.data.fill_(1.0)
|
316 |
+
|
317 |
+
|
318 |
+
CONVNEXTV2_START_DOCSTRING = r"""
|
319 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
320 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
321 |
+
behavior.
|
322 |
+
|
323 |
+
Parameters:
|
324 |
+
config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
|
325 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
326 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
327 |
+
"""
|
328 |
+
|
329 |
+
CONVNEXTV2_INPUTS_DOCSTRING = r"""
|
330 |
+
Args:
|
331 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
332 |
+
Pixel values. Pixel values can be obtained using [`ConvNextImageProcessor`]. See
|
333 |
+
[`ConvNextImageProcessor.__call__`] for details.
|
334 |
+
output_hidden_states (`bool`, *optional*):
|
335 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
336 |
+
more detail.
|
337 |
+
return_dict (`bool`, *optional*):
|
338 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
339 |
+
"""
|
340 |
+
|
341 |
+
|
342 |
+
@add_start_docstrings(
|
343 |
+
"The bare ConvNextV2 model outputting raw features without any specific head on top.",
|
344 |
+
CONVNEXTV2_START_DOCSTRING,
|
345 |
+
)
|
346 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextModel with CONVNEXT->CONVNEXTV2, ConvNext->ConvNextV2
|
347 |
+
class ConvNextV2Model(ConvNextV2PreTrainedModel):
|
348 |
+
def __init__(self, config):
|
349 |
+
super().__init__(config)
|
350 |
+
self.config = config
|
351 |
+
|
352 |
+
self.embeddings = ConvNextV2Embeddings(config)
|
353 |
+
self.encoder = ConvNextV2Encoder(config)
|
354 |
+
|
355 |
+
# final layernorm layer
|
356 |
+
self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
|
357 |
+
|
358 |
+
# Initialize weights and apply final processing
|
359 |
+
self.post_init()
|
360 |
+
|
361 |
+
@add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
|
362 |
+
@add_code_sample_docstrings(
|
363 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
364 |
+
output_type=BaseModelOutputWithPoolingAndNoAttention,
|
365 |
+
config_class=_CONFIG_FOR_DOC,
|
366 |
+
modality="vision",
|
367 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
368 |
+
)
|
369 |
+
def forward(
|
370 |
+
self,
|
371 |
+
pixel_values: torch.FloatTensor = None,
|
372 |
+
output_hidden_states: Optional[bool] = None,
|
373 |
+
return_dict: Optional[bool] = None,
|
374 |
+
) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
|
375 |
+
output_hidden_states = (
|
376 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
377 |
+
)
|
378 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
379 |
+
|
380 |
+
if pixel_values is None:
|
381 |
+
raise ValueError("You have to specify pixel_values")
|
382 |
+
|
383 |
+
embedding_output = self.embeddings(pixel_values)
|
384 |
+
|
385 |
+
encoder_outputs = self.encoder(
|
386 |
+
embedding_output,
|
387 |
+
output_hidden_states=output_hidden_states,
|
388 |
+
return_dict=return_dict,
|
389 |
+
)
|
390 |
+
|
391 |
+
last_hidden_state = encoder_outputs[0]
|
392 |
+
|
393 |
+
# global average pooling, (N, C, H, W) -> (N, C)
|
394 |
+
pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
|
395 |
+
|
396 |
+
if not return_dict:
|
397 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
398 |
+
|
399 |
+
return BaseModelOutputWithPoolingAndNoAttention(
|
400 |
+
last_hidden_state=last_hidden_state,
|
401 |
+
pooler_output=pooled_output,
|
402 |
+
hidden_states=encoder_outputs.hidden_states,
|
403 |
+
)
|
404 |
+
|
405 |
+
|
406 |
+
@add_start_docstrings(
|
407 |
+
"""
|
408 |
+
ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
|
409 |
+
ImageNet.
|
410 |
+
""",
|
411 |
+
CONVNEXTV2_START_DOCSTRING,
|
412 |
+
)
|
413 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextForImageClassification with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,convnext->convnextv2
|
414 |
+
class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel):
|
415 |
+
def __init__(self, config):
|
416 |
+
super().__init__(config)
|
417 |
+
|
418 |
+
self.num_labels = config.num_labels
|
419 |
+
self.convnextv2 = ConvNextV2Model(config)
|
420 |
+
|
421 |
+
# Classifier head
|
422 |
+
self.classifier = (
|
423 |
+
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
|
424 |
+
)
|
425 |
+
|
426 |
+
# Initialize weights and apply final processing
|
427 |
+
self.post_init()
|
428 |
+
|
429 |
+
@add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
|
430 |
+
@add_code_sample_docstrings(
|
431 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
432 |
+
output_type=ImageClassifierOutputWithNoAttention,
|
433 |
+
config_class=_CONFIG_FOR_DOC,
|
434 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
435 |
+
)
|
436 |
+
def forward(
|
437 |
+
self,
|
438 |
+
pixel_values: torch.FloatTensor = None,
|
439 |
+
labels: Optional[torch.LongTensor] = None,
|
440 |
+
output_hidden_states: Optional[bool] = None,
|
441 |
+
return_dict: Optional[bool] = None,
|
442 |
+
) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
|
443 |
+
r"""
|
444 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
445 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
446 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
447 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
448 |
+
"""
|
449 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
450 |
+
|
451 |
+
outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
452 |
+
|
453 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
454 |
+
|
455 |
+
logits = self.classifier(pooled_output)
|
456 |
+
|
457 |
+
loss = None
|
458 |
+
if labels is not None:
|
459 |
+
if self.config.problem_type is None:
|
460 |
+
if self.num_labels == 1:
|
461 |
+
self.config.problem_type = "regression"
|
462 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
463 |
+
self.config.problem_type = "single_label_classification"
|
464 |
+
else:
|
465 |
+
self.config.problem_type = "multi_label_classification"
|
466 |
+
|
467 |
+
if self.config.problem_type == "regression":
|
468 |
+
loss_fct = MSELoss()
|
469 |
+
if self.num_labels == 1:
|
470 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
471 |
+
else:
|
472 |
+
loss = loss_fct(logits, labels)
|
473 |
+
elif self.config.problem_type == "single_label_classification":
|
474 |
+
loss_fct = CrossEntropyLoss()
|
475 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
476 |
+
elif self.config.problem_type == "multi_label_classification":
|
477 |
+
loss_fct = BCEWithLogitsLoss()
|
478 |
+
loss = loss_fct(logits, labels)
|
479 |
+
if not return_dict:
|
480 |
+
output = (logits,) + outputs[2:]
|
481 |
+
return ((loss,) + output) if loss is not None else output
|
482 |
+
|
483 |
+
return ImageClassifierOutputWithNoAttention(
|
484 |
+
loss=loss,
|
485 |
+
logits=logits,
|
486 |
+
hidden_states=outputs.hidden_states,
|
487 |
+
)
|
488 |
+
|
489 |
+
|
490 |
+
@add_start_docstrings(
|
491 |
+
"""
|
492 |
+
ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.
|
493 |
+
""",
|
494 |
+
CONVNEXTV2_START_DOCSTRING,
|
495 |
+
)
|
496 |
+
# Copied from transformers.models.convnext.modeling_convnext.ConvNextBackbone with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,facebook/convnext-tiny-224->facebook/convnextv2-tiny-1k-224
|
497 |
+
class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin):
|
498 |
+
def __init__(self, config):
|
499 |
+
super().__init__(config)
|
500 |
+
super()._init_backbone(config)
|
501 |
+
|
502 |
+
self.embeddings = ConvNextV2Embeddings(config)
|
503 |
+
self.encoder = ConvNextV2Encoder(config)
|
504 |
+
self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
|
505 |
+
|
506 |
+
# Add layer norms to hidden states of out_features
|
507 |
+
hidden_states_norms = {}
|
508 |
+
for stage, num_channels in zip(self._out_features, self.channels):
|
509 |
+
hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format="channels_first")
|
510 |
+
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
|
511 |
+
|
512 |
+
# initialize weights and apply final processing
|
513 |
+
self.post_init()
|
514 |
+
|
515 |
+
@add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
|
516 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
517 |
+
def forward(
|
518 |
+
self,
|
519 |
+
pixel_values: torch.Tensor,
|
520 |
+
output_hidden_states: Optional[bool] = None,
|
521 |
+
return_dict: Optional[bool] = None,
|
522 |
+
) -> BackboneOutput:
|
523 |
+
"""
|
524 |
+
Returns:
|
525 |
+
|
526 |
+
Examples:
|
527 |
+
|
528 |
+
```python
|
529 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
530 |
+
>>> import torch
|
531 |
+
>>> from PIL import Image
|
532 |
+
>>> import requests
|
533 |
+
|
534 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
535 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
536 |
+
|
537 |
+
>>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224")
|
538 |
+
>>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224")
|
539 |
+
|
540 |
+
>>> inputs = processor(image, return_tensors="pt")
|
541 |
+
>>> outputs = model(**inputs)
|
542 |
+
```"""
|
543 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
544 |
+
output_hidden_states = (
|
545 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
546 |
+
)
|
547 |
+
|
548 |
+
embedding_output = self.embeddings(pixel_values)
|
549 |
+
|
550 |
+
outputs = self.encoder(
|
551 |
+
embedding_output,
|
552 |
+
output_hidden_states=True,
|
553 |
+
return_dict=return_dict,
|
554 |
+
)
|
555 |
+
|
556 |
+
hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
557 |
+
|
558 |
+
feature_maps = ()
|
559 |
+
for stage, hidden_state in zip(self.stage_names, hidden_states):
|
560 |
+
if stage in self.out_features:
|
561 |
+
hidden_state = self.hidden_states_norms[stage](hidden_state)
|
562 |
+
feature_maps += (hidden_state,)
|
563 |
+
|
564 |
+
if not return_dict:
|
565 |
+
output = (feature_maps,)
|
566 |
+
if output_hidden_states:
|
567 |
+
output += (hidden_states,)
|
568 |
+
return output
|
569 |
+
|
570 |
+
return BackboneOutput(
|
571 |
+
feature_maps=feature_maps,
|
572 |
+
hidden_states=hidden_states if output_hidden_states else None,
|
573 |
+
attentions=None,
|
574 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py
ADDED
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" TF 2.0 ConvNextV2 model."""
|
16 |
+
|
17 |
+
|
18 |
+
from __future__ import annotations
|
19 |
+
|
20 |
+
from typing import List, Optional, Tuple, Union
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
import tensorflow as tf
|
24 |
+
|
25 |
+
from ...activations_tf import get_tf_activation
|
26 |
+
from ...modeling_tf_outputs import (
|
27 |
+
TFBaseModelOutputWithNoAttention,
|
28 |
+
TFBaseModelOutputWithPooling,
|
29 |
+
TFBaseModelOutputWithPoolingAndNoAttention,
|
30 |
+
TFImageClassifierOutputWithNoAttention,
|
31 |
+
)
|
32 |
+
from ...modeling_tf_utils import (
|
33 |
+
TFModelInputType,
|
34 |
+
TFPreTrainedModel,
|
35 |
+
TFSequenceClassificationLoss,
|
36 |
+
get_initializer,
|
37 |
+
keras,
|
38 |
+
keras_serializable,
|
39 |
+
unpack_inputs,
|
40 |
+
)
|
41 |
+
from ...tf_utils import shape_list
|
42 |
+
from ...utils import (
|
43 |
+
add_code_sample_docstrings,
|
44 |
+
add_start_docstrings,
|
45 |
+
add_start_docstrings_to_model_forward,
|
46 |
+
logging,
|
47 |
+
)
|
48 |
+
from .configuration_convnextv2 import ConvNextV2Config
|
49 |
+
|
50 |
+
|
51 |
+
logger = logging.get_logger(__name__)
|
52 |
+
|
53 |
+
# General docstring
|
54 |
+
_CONFIG_FOR_DOC = "ConvNextV2Config"
|
55 |
+
|
56 |
+
# Base docstring
|
57 |
+
_CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
|
58 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
|
59 |
+
|
60 |
+
# Image classification docstring
|
61 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
|
62 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
63 |
+
|
64 |
+
|
65 |
+
# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->ConvNextV2
|
66 |
+
class TFConvNextV2DropPath(keras.layers.Layer):
|
67 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
68 |
+
References:
|
69 |
+
(1) github.com:rwightman/pytorch-image-models
|
70 |
+
"""
|
71 |
+
|
72 |
+
def __init__(self, drop_path: float, **kwargs):
|
73 |
+
super().__init__(**kwargs)
|
74 |
+
self.drop_path = drop_path
|
75 |
+
|
76 |
+
def call(self, x: tf.Tensor, training=None):
|
77 |
+
if training:
|
78 |
+
keep_prob = 1 - self.drop_path
|
79 |
+
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
|
80 |
+
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
|
81 |
+
random_tensor = tf.floor(random_tensor)
|
82 |
+
return (x / keep_prob) * random_tensor
|
83 |
+
return x
|
84 |
+
|
85 |
+
|
86 |
+
class TFConvNextV2GRN(keras.layers.Layer):
|
87 |
+
"""GRN (Global Response Normalization) layer"""
|
88 |
+
|
89 |
+
def __init__(self, config: ConvNextV2Config, dim: int, **kwargs):
|
90 |
+
super().__init__(**kwargs)
|
91 |
+
self.dim = dim
|
92 |
+
|
93 |
+
def build(self, input_shape: tf.TensorShape = None):
|
94 |
+
# PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
|
95 |
+
self.weight = self.add_weight(
|
96 |
+
name="weight",
|
97 |
+
shape=(1, 1, 1, self.dim),
|
98 |
+
initializer=keras.initializers.Zeros(),
|
99 |
+
)
|
100 |
+
self.bias = self.add_weight(
|
101 |
+
name="bias",
|
102 |
+
shape=(1, 1, 1, self.dim),
|
103 |
+
initializer=keras.initializers.Zeros(),
|
104 |
+
)
|
105 |
+
return super().build(input_shape)
|
106 |
+
|
107 |
+
def call(self, hidden_states: tf.Tensor):
|
108 |
+
global_features = tf.norm(hidden_states, ord="euclidean", axis=(1, 2), keepdims=True)
|
109 |
+
norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-6)
|
110 |
+
hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
|
111 |
+
return hidden_states
|
112 |
+
|
113 |
+
|
114 |
+
# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextEmbeddings with ConvNext->ConvNextV2
|
115 |
+
class TFConvNextV2Embeddings(keras.layers.Layer):
|
116 |
+
"""This class is comparable to (and inspired by) the SwinEmbeddings class
|
117 |
+
found in src/transformers/models/swin/modeling_swin.py.
|
118 |
+
"""
|
119 |
+
|
120 |
+
def __init__(self, config: ConvNextV2Config, **kwargs):
|
121 |
+
super().__init__(**kwargs)
|
122 |
+
self.patch_embeddings = keras.layers.Conv2D(
|
123 |
+
filters=config.hidden_sizes[0],
|
124 |
+
kernel_size=config.patch_size,
|
125 |
+
strides=config.patch_size,
|
126 |
+
name="patch_embeddings",
|
127 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
128 |
+
bias_initializer=keras.initializers.Zeros(),
|
129 |
+
)
|
130 |
+
self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
|
131 |
+
self.num_channels = config.num_channels
|
132 |
+
self.config = config
|
133 |
+
|
134 |
+
def call(self, pixel_values):
|
135 |
+
if isinstance(pixel_values, dict):
|
136 |
+
pixel_values = pixel_values["pixel_values"]
|
137 |
+
|
138 |
+
tf.debugging.assert_equal(
|
139 |
+
shape_list(pixel_values)[1],
|
140 |
+
self.num_channels,
|
141 |
+
message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
|
142 |
+
)
|
143 |
+
|
144 |
+
# When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
|
145 |
+
# So change the input format from `NCHW` to `NHWC`.
|
146 |
+
# shape = (batch_size, in_height, in_width, in_channels)
|
147 |
+
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
|
148 |
+
|
149 |
+
embeddings = self.patch_embeddings(pixel_values)
|
150 |
+
embeddings = self.layernorm(embeddings)
|
151 |
+
return embeddings
|
152 |
+
|
153 |
+
def build(self, input_shape=None):
|
154 |
+
if self.built:
|
155 |
+
return
|
156 |
+
self.built = True
|
157 |
+
if getattr(self, "patch_embeddings", None) is not None:
|
158 |
+
with tf.name_scope(self.patch_embeddings.name):
|
159 |
+
self.patch_embeddings.build([None, None, None, self.config.num_channels])
|
160 |
+
if getattr(self, "layernorm", None) is not None:
|
161 |
+
with tf.name_scope(self.layernorm.name):
|
162 |
+
self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
|
163 |
+
|
164 |
+
|
165 |
+
class TFConvNextV2Layer(keras.layers.Layer):
|
166 |
+
"""This corresponds to the `Block` class in the original implementation.
|
167 |
+
|
168 |
+
There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
|
169 |
+
H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
|
170 |
+
|
171 |
+
The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
|
172 |
+
NHWC ordering, we can just apply the operations straight-away without the permutation.
|
173 |
+
|
174 |
+
Args:
|
175 |
+
config (`ConvNextV2Config`):
|
176 |
+
Model configuration class.
|
177 |
+
dim (`int`):
|
178 |
+
Number of input channels.
|
179 |
+
drop_path (`float`, defaults to 0.0):
|
180 |
+
Stochastic depth rate.
|
181 |
+
"""
|
182 |
+
|
183 |
+
def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float = 0.0, **kwargs):
|
184 |
+
super().__init__(**kwargs)
|
185 |
+
self.dim = dim
|
186 |
+
self.config = config
|
187 |
+
self.dwconv = keras.layers.Conv2D(
|
188 |
+
filters=dim,
|
189 |
+
kernel_size=7,
|
190 |
+
padding="same",
|
191 |
+
groups=dim,
|
192 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
193 |
+
bias_initializer=keras.initializers.Zeros(),
|
194 |
+
name="dwconv",
|
195 |
+
) # depthwise conv
|
196 |
+
self.layernorm = keras.layers.LayerNormalization(
|
197 |
+
epsilon=1e-6,
|
198 |
+
name="layernorm",
|
199 |
+
)
|
200 |
+
self.pwconv1 = keras.layers.Dense(
|
201 |
+
units=4 * dim,
|
202 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
203 |
+
bias_initializer=keras.initializers.Zeros(),
|
204 |
+
name="pwconv1",
|
205 |
+
) # pointwise/1x1 convs, implemented with linear layers
|
206 |
+
self.act = get_tf_activation(config.hidden_act)
|
207 |
+
self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name="grn")
|
208 |
+
self.pwconv2 = keras.layers.Dense(
|
209 |
+
units=dim,
|
210 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
211 |
+
bias_initializer=keras.initializers.Zeros(),
|
212 |
+
name="pwconv2",
|
213 |
+
)
|
214 |
+
# Using `layers.Activation` instead of `tf.identity` to better control `training`
|
215 |
+
# behaviour.
|
216 |
+
self.drop_path = (
|
217 |
+
TFConvNextV2DropPath(drop_path, name="drop_path")
|
218 |
+
if drop_path > 0.0
|
219 |
+
else keras.layers.Activation("linear", name="drop_path")
|
220 |
+
)
|
221 |
+
|
222 |
+
def call(self, hidden_states, training=False):
|
223 |
+
input = hidden_states
|
224 |
+
x = self.dwconv(hidden_states)
|
225 |
+
x = self.layernorm(x)
|
226 |
+
x = self.pwconv1(x)
|
227 |
+
x = self.act(x)
|
228 |
+
x = self.grn(x)
|
229 |
+
x = self.pwconv2(x)
|
230 |
+
x = self.drop_path(x, training=training)
|
231 |
+
x = input + x
|
232 |
+
return x
|
233 |
+
|
234 |
+
def build(self, input_shape=None):
|
235 |
+
if self.built:
|
236 |
+
return
|
237 |
+
self.built = True
|
238 |
+
if getattr(self, "dwconv", None) is not None:
|
239 |
+
with tf.name_scope(self.dwconv.name):
|
240 |
+
self.dwconv.build([None, None, None, self.dim])
|
241 |
+
if getattr(self, "layernorm", None) is not None:
|
242 |
+
with tf.name_scope(self.layernorm.name):
|
243 |
+
self.layernorm.build([None, None, None, self.dim])
|
244 |
+
if getattr(self, "pwconv1", None) is not None:
|
245 |
+
with tf.name_scope(self.pwconv1.name):
|
246 |
+
self.pwconv1.build([None, None, self.dim])
|
247 |
+
if getattr(self, "grn", None) is not None:
|
248 |
+
with tf.name_scope(self.grn.name):
|
249 |
+
self.grn.build(None)
|
250 |
+
if getattr(self, "pwconv2", None) is not None:
|
251 |
+
with tf.name_scope(self.pwconv2.name):
|
252 |
+
self.pwconv2.build([None, None, 4 * self.dim])
|
253 |
+
if getattr(self, "drop_path", None) is not None:
|
254 |
+
with tf.name_scope(self.drop_path.name):
|
255 |
+
self.drop_path.build(None)
|
256 |
+
|
257 |
+
|
258 |
+
# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextStage with ConvNext->ConvNextV2
|
259 |
+
class TFConvNextV2Stage(keras.layers.Layer):
|
260 |
+
"""ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
|
261 |
+
|
262 |
+
Args:
|
263 |
+
config (`ConvNextV2V2Config`):
|
264 |
+
Model configuration class.
|
265 |
+
in_channels (`int`):
|
266 |
+
Number of input channels.
|
267 |
+
out_channels (`int`):
|
268 |
+
Number of output channels.
|
269 |
+
depth (`int`):
|
270 |
+
Number of residual blocks.
|
271 |
+
drop_path_rates(`List[float]`):
|
272 |
+
Stochastic depth rates for each layer.
|
273 |
+
"""
|
274 |
+
|
275 |
+
def __init__(
|
276 |
+
self,
|
277 |
+
config: ConvNextV2Config,
|
278 |
+
in_channels: int,
|
279 |
+
out_channels: int,
|
280 |
+
kernel_size: int = 2,
|
281 |
+
stride: int = 2,
|
282 |
+
depth: int = 2,
|
283 |
+
drop_path_rates: Optional[List[float]] = None,
|
284 |
+
**kwargs,
|
285 |
+
):
|
286 |
+
super().__init__(**kwargs)
|
287 |
+
if in_channels != out_channels or stride > 1:
|
288 |
+
self.downsampling_layer = [
|
289 |
+
keras.layers.LayerNormalization(
|
290 |
+
epsilon=1e-6,
|
291 |
+
name="downsampling_layer.0",
|
292 |
+
),
|
293 |
+
# Inputs to this layer will follow NHWC format since we
|
294 |
+
# transposed the inputs from NCHW to NHWC in the `TFConvNextV2Embeddings`
|
295 |
+
# layer. All the outputs throughout the model will be in NHWC
|
296 |
+
# from this point on until the output where we again change to
|
297 |
+
# NCHW.
|
298 |
+
keras.layers.Conv2D(
|
299 |
+
filters=out_channels,
|
300 |
+
kernel_size=kernel_size,
|
301 |
+
strides=stride,
|
302 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
303 |
+
bias_initializer=keras.initializers.Zeros(),
|
304 |
+
name="downsampling_layer.1",
|
305 |
+
),
|
306 |
+
]
|
307 |
+
else:
|
308 |
+
self.downsampling_layer = [tf.identity]
|
309 |
+
|
310 |
+
drop_path_rates = drop_path_rates or [0.0] * depth
|
311 |
+
self.layers = [
|
312 |
+
TFConvNextV2Layer(
|
313 |
+
config,
|
314 |
+
dim=out_channels,
|
315 |
+
drop_path=drop_path_rates[j],
|
316 |
+
name=f"layers.{j}",
|
317 |
+
)
|
318 |
+
for j in range(depth)
|
319 |
+
]
|
320 |
+
self.in_channels = in_channels
|
321 |
+
self.out_channels = out_channels
|
322 |
+
self.stride = stride
|
323 |
+
|
324 |
+
def call(self, hidden_states):
|
325 |
+
for layer in self.downsampling_layer:
|
326 |
+
hidden_states = layer(hidden_states)
|
327 |
+
for layer in self.layers:
|
328 |
+
hidden_states = layer(hidden_states)
|
329 |
+
return hidden_states
|
330 |
+
|
331 |
+
def build(self, input_shape=None):
|
332 |
+
if self.built:
|
333 |
+
return
|
334 |
+
self.built = True
|
335 |
+
if getattr(self, "layers", None) is not None:
|
336 |
+
for layer in self.layers:
|
337 |
+
with tf.name_scope(layer.name):
|
338 |
+
layer.build(None)
|
339 |
+
if self.in_channels != self.out_channels or self.stride > 1:
|
340 |
+
with tf.name_scope(self.downsampling_layer[0].name):
|
341 |
+
self.downsampling_layer[0].build([None, None, None, self.in_channels])
|
342 |
+
with tf.name_scope(self.downsampling_layer[1].name):
|
343 |
+
self.downsampling_layer[1].build([None, None, None, self.in_channels])
|
344 |
+
|
345 |
+
|
346 |
+
class TFConvNextV2Encoder(keras.layers.Layer):
|
347 |
+
def __init__(self, config: ConvNextV2Config, **kwargs):
|
348 |
+
super().__init__(**kwargs)
|
349 |
+
self.stages = []
|
350 |
+
drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
|
351 |
+
drop_path_rates = tf.split(drop_path_rates, config.depths)
|
352 |
+
drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
|
353 |
+
prev_chs = config.hidden_sizes[0]
|
354 |
+
for i in range(config.num_stages):
|
355 |
+
out_chs = config.hidden_sizes[i]
|
356 |
+
stage = TFConvNextV2Stage(
|
357 |
+
config,
|
358 |
+
in_channels=prev_chs,
|
359 |
+
out_channels=out_chs,
|
360 |
+
stride=2 if i > 0 else 1,
|
361 |
+
depth=config.depths[i],
|
362 |
+
drop_path_rates=drop_path_rates[i],
|
363 |
+
name=f"stages.{i}",
|
364 |
+
)
|
365 |
+
self.stages.append(stage)
|
366 |
+
prev_chs = out_chs
|
367 |
+
|
368 |
+
def call(
|
369 |
+
self,
|
370 |
+
hidden_states: tf.Tensor,
|
371 |
+
output_hidden_states: Optional[bool] = False,
|
372 |
+
return_dict: Optional[bool] = True,
|
373 |
+
) -> Union[Tuple, TFBaseModelOutputWithNoAttention]:
|
374 |
+
all_hidden_states = () if output_hidden_states else None
|
375 |
+
|
376 |
+
for i, layer_module in enumerate(self.stages):
|
377 |
+
if output_hidden_states:
|
378 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
379 |
+
|
380 |
+
hidden_states = layer_module(hidden_states)
|
381 |
+
|
382 |
+
if output_hidden_states:
|
383 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
384 |
+
|
385 |
+
if not return_dict:
|
386 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
387 |
+
|
388 |
+
return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
389 |
+
|
390 |
+
def build(self, input_shape=None):
|
391 |
+
for stage in self.stages:
|
392 |
+
with tf.name_scope(stage.name):
|
393 |
+
stage.build(None)
|
394 |
+
|
395 |
+
|
396 |
+
@keras_serializable
|
397 |
+
class TFConvNextV2MainLayer(keras.layers.Layer):
|
398 |
+
config_class = ConvNextV2Config
|
399 |
+
|
400 |
+
def __init__(self, config: ConvNextV2Config, **kwargs):
|
401 |
+
super().__init__(**kwargs)
|
402 |
+
|
403 |
+
self.config = config
|
404 |
+
self.embeddings = TFConvNextV2Embeddings(config, name="embeddings")
|
405 |
+
self.encoder = TFConvNextV2Encoder(config, name="encoder")
|
406 |
+
self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
|
407 |
+
# We are setting the `data_format` like so because from here on we will revert to the
|
408 |
+
# NCHW output format
|
409 |
+
self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_last")
|
410 |
+
|
411 |
+
@unpack_inputs
|
412 |
+
def call(
|
413 |
+
self,
|
414 |
+
pixel_values: TFModelInputType | None = None,
|
415 |
+
output_hidden_states: Optional[bool] = None,
|
416 |
+
return_dict: Optional[bool] = None,
|
417 |
+
training: bool = False,
|
418 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
|
419 |
+
output_hidden_states = (
|
420 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
421 |
+
)
|
422 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
423 |
+
|
424 |
+
if pixel_values is None:
|
425 |
+
raise ValueError("You have to specify pixel_values")
|
426 |
+
|
427 |
+
embedding_output = self.embeddings(pixel_values, training=training)
|
428 |
+
|
429 |
+
encoder_outputs = self.encoder(
|
430 |
+
embedding_output,
|
431 |
+
output_hidden_states=output_hidden_states,
|
432 |
+
return_dict=return_dict,
|
433 |
+
training=training,
|
434 |
+
)
|
435 |
+
|
436 |
+
last_hidden_state = encoder_outputs[0]
|
437 |
+
|
438 |
+
# Change to NCHW output format have uniformity in the modules
|
439 |
+
pooled_output = self.pooler(last_hidden_state)
|
440 |
+
last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
|
441 |
+
pooled_output = self.layernorm(pooled_output)
|
442 |
+
|
443 |
+
# Change the other hidden state outputs to NCHW as well
|
444 |
+
if output_hidden_states:
|
445 |
+
hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
|
446 |
+
|
447 |
+
if not return_dict:
|
448 |
+
hidden_states = hidden_states if output_hidden_states else ()
|
449 |
+
return (last_hidden_state, pooled_output) + hidden_states
|
450 |
+
|
451 |
+
return TFBaseModelOutputWithPoolingAndNoAttention(
|
452 |
+
last_hidden_state=last_hidden_state,
|
453 |
+
pooler_output=pooled_output,
|
454 |
+
hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
|
455 |
+
)
|
456 |
+
|
457 |
+
def build(self, input_shape=None):
|
458 |
+
if self.built:
|
459 |
+
return
|
460 |
+
self.built = True
|
461 |
+
if getattr(self, "embeddings", None) is not None:
|
462 |
+
with tf.name_scope(self.embeddings.name):
|
463 |
+
self.embeddings.build(None)
|
464 |
+
if getattr(self, "encoder", None) is not None:
|
465 |
+
with tf.name_scope(self.encoder.name):
|
466 |
+
self.encoder.build(None)
|
467 |
+
if getattr(self, "layernorm", None) is not None:
|
468 |
+
with tf.name_scope(self.layernorm.name):
|
469 |
+
self.layernorm.build([None, self.config.hidden_sizes[-1]])
|
470 |
+
|
471 |
+
|
472 |
+
class TFConvNextV2PreTrainedModel(TFPreTrainedModel):
|
473 |
+
"""
|
474 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
475 |
+
models.
|
476 |
+
"""
|
477 |
+
|
478 |
+
config_class = ConvNextV2Config
|
479 |
+
base_model_prefix = "convnextv2"
|
480 |
+
main_input_name = "pixel_values"
|
481 |
+
|
482 |
+
|
483 |
+
CONVNEXTV2_START_DOCSTRING = r"""
|
484 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
485 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
486 |
+
etc.)
|
487 |
+
|
488 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
489 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
490 |
+
behavior.
|
491 |
+
|
492 |
+
<Tip>
|
493 |
+
|
494 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
495 |
+
|
496 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
497 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
498 |
+
|
499 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
500 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
501 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
502 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
503 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
504 |
+
positional argument:
|
505 |
+
|
506 |
+
- a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
|
507 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
508 |
+
`model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
|
509 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
510 |
+
`model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
|
511 |
+
|
512 |
+
Note that when creating models and layers with
|
513 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
514 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
515 |
+
|
516 |
+
</Tip>
|
517 |
+
|
518 |
+
Parameters:
|
519 |
+
config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
|
520 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
521 |
+
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
|
522 |
+
"""
|
523 |
+
|
524 |
+
CONVNEXTV2_INPUTS_DOCSTRING = r"""
|
525 |
+
Args:
|
526 |
+
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
|
527 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
528 |
+
[`ConvNextImageProcessor.__call__`] for details.
|
529 |
+
|
530 |
+
output_hidden_states (`bool`, *optional*):
|
531 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
532 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
533 |
+
used instead.
|
534 |
+
return_dict (`bool`, *optional*):
|
535 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
536 |
+
eager mode, in graph mode the value will always be set to `True`.
|
537 |
+
"""
|
538 |
+
|
539 |
+
|
540 |
+
@add_start_docstrings(
|
541 |
+
"The bare ConvNextV2 model outputting raw features without any specific head on top.",
|
542 |
+
CONVNEXTV2_START_DOCSTRING,
|
543 |
+
)
|
544 |
+
class TFConvNextV2Model(TFConvNextV2PreTrainedModel):
|
545 |
+
def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
|
546 |
+
super().__init__(config, *inputs, **kwargs)
|
547 |
+
self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
|
548 |
+
|
549 |
+
@unpack_inputs
|
550 |
+
@add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
|
551 |
+
@add_code_sample_docstrings(
|
552 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
553 |
+
output_type=TFBaseModelOutputWithPoolingAndNoAttention,
|
554 |
+
config_class=_CONFIG_FOR_DOC,
|
555 |
+
modality="vision",
|
556 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
557 |
+
)
|
558 |
+
def call(
|
559 |
+
self,
|
560 |
+
pixel_values: TFModelInputType | None = None,
|
561 |
+
output_hidden_states: Optional[bool] = None,
|
562 |
+
return_dict: Optional[bool] = None,
|
563 |
+
training: bool = False,
|
564 |
+
) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
|
565 |
+
output_hidden_states = (
|
566 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
567 |
+
)
|
568 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
569 |
+
|
570 |
+
if pixel_values is None:
|
571 |
+
raise ValueError("You have to specify pixel_values")
|
572 |
+
|
573 |
+
outputs = self.convnextv2(
|
574 |
+
pixel_values=pixel_values,
|
575 |
+
output_hidden_states=output_hidden_states,
|
576 |
+
return_dict=return_dict,
|
577 |
+
training=training,
|
578 |
+
)
|
579 |
+
|
580 |
+
if not return_dict:
|
581 |
+
return outputs[:]
|
582 |
+
|
583 |
+
return TFBaseModelOutputWithPoolingAndNoAttention(
|
584 |
+
last_hidden_state=outputs.last_hidden_state,
|
585 |
+
pooler_output=outputs.pooler_output,
|
586 |
+
hidden_states=outputs.hidden_states,
|
587 |
+
)
|
588 |
+
|
589 |
+
def build(self, input_shape=None):
|
590 |
+
if self.built:
|
591 |
+
return
|
592 |
+
self.built = True
|
593 |
+
if getattr(self, "convnextv2", None) is not None:
|
594 |
+
with tf.name_scope(self.convnextv2.name):
|
595 |
+
self.convnextv2.build(None)
|
596 |
+
|
597 |
+
|
598 |
+
@add_start_docstrings(
|
599 |
+
"""
|
600 |
+
ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
|
601 |
+
ImageNet.
|
602 |
+
""",
|
603 |
+
CONVNEXTV2_START_DOCSTRING,
|
604 |
+
)
|
605 |
+
class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss):
|
606 |
+
def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
|
607 |
+
super().__init__(config, *inputs, **kwargs)
|
608 |
+
|
609 |
+
self.num_labels = config.num_labels
|
610 |
+
self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
|
611 |
+
|
612 |
+
# Classifier head
|
613 |
+
self.classifier = keras.layers.Dense(
|
614 |
+
units=config.num_labels,
|
615 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
616 |
+
bias_initializer=keras.initializers.Zeros(),
|
617 |
+
name="classifier",
|
618 |
+
)
|
619 |
+
|
620 |
+
@unpack_inputs
|
621 |
+
@add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
|
622 |
+
@add_code_sample_docstrings(
|
623 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
624 |
+
output_type=TFImageClassifierOutputWithNoAttention,
|
625 |
+
config_class=_CONFIG_FOR_DOC,
|
626 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
627 |
+
)
|
628 |
+
def call(
|
629 |
+
self,
|
630 |
+
pixel_values: TFModelInputType | None = None,
|
631 |
+
output_hidden_states: Optional[bool] = None,
|
632 |
+
return_dict: Optional[bool] = None,
|
633 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
634 |
+
training: Optional[bool] = False,
|
635 |
+
) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
|
636 |
+
r"""
|
637 |
+
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
|
638 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
639 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
640 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
641 |
+
"""
|
642 |
+
output_hidden_states = (
|
643 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
644 |
+
)
|
645 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
646 |
+
|
647 |
+
if pixel_values is None:
|
648 |
+
raise ValueError("You have to specify pixel_values")
|
649 |
+
|
650 |
+
outputs = self.convnextv2(
|
651 |
+
pixel_values,
|
652 |
+
output_hidden_states=output_hidden_states,
|
653 |
+
return_dict=return_dict,
|
654 |
+
training=training,
|
655 |
+
)
|
656 |
+
|
657 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
658 |
+
|
659 |
+
logits = self.classifier(pooled_output)
|
660 |
+
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
|
661 |
+
|
662 |
+
if not return_dict:
|
663 |
+
output = (logits,) + outputs[2:]
|
664 |
+
return ((loss,) + output) if loss is not None else output
|
665 |
+
|
666 |
+
return TFImageClassifierOutputWithNoAttention(
|
667 |
+
loss=loss,
|
668 |
+
logits=logits,
|
669 |
+
hidden_states=outputs.hidden_states,
|
670 |
+
)
|
671 |
+
|
672 |
+
def build(self, input_shape=None):
|
673 |
+
if self.built:
|
674 |
+
return
|
675 |
+
self.built = True
|
676 |
+
if getattr(self, "convnextv2", None) is not None:
|
677 |
+
with tf.name_scope(self.convnextv2.name):
|
678 |
+
self.convnextv2.build(None)
|
679 |
+
if getattr(self, "classifier", None) is not None:
|
680 |
+
with tf.name_scope(self.classifier.name):
|
681 |
+
self.classifier.build([None, None, self.config.hidden_sizes[-1]])
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dpt/convert_dinov2_depth_to_hf.py
ADDED
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert DINOv2 + DPT checkpoints from the original repository. URL:
|
16 |
+
https://github.com/facebookresearch/dinov2/tree/main"""
|
17 |
+
|
18 |
+
|
19 |
+
import argparse
|
20 |
+
import itertools
|
21 |
+
import math
|
22 |
+
from pathlib import Path
|
23 |
+
|
24 |
+
import requests
|
25 |
+
import torch
|
26 |
+
from PIL import Image
|
27 |
+
from torchvision import transforms
|
28 |
+
|
29 |
+
from transformers import Dinov2Config, DPTConfig, DPTForDepthEstimation, DPTImageProcessor
|
30 |
+
from transformers.utils import logging
|
31 |
+
|
32 |
+
|
33 |
+
logging.set_verbosity_info()
|
34 |
+
logger = logging.get_logger(__name__)
|
35 |
+
|
36 |
+
|
37 |
+
def get_dpt_config(model_name):
|
38 |
+
if "small" in model_name:
|
39 |
+
# equivalent to stage 3, stage 6, stage 9, stage 12
|
40 |
+
backbone_config = Dinov2Config.from_pretrained(
|
41 |
+
"facebook/dinov2-small", out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False
|
42 |
+
)
|
43 |
+
neck_hidden_sizes = [48, 96, 192, 384]
|
44 |
+
elif "base" in model_name:
|
45 |
+
backbone_config = Dinov2Config.from_pretrained(
|
46 |
+
"facebook/dinov2-base", out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False
|
47 |
+
)
|
48 |
+
neck_hidden_sizes = [96, 192, 384, 768]
|
49 |
+
elif "large" in model_name:
|
50 |
+
backbone_config = Dinov2Config.from_pretrained(
|
51 |
+
"facebook/dinov2-large", out_indices=[5, 12, 18, 24], apply_layernorm=False, reshape_hidden_states=False
|
52 |
+
)
|
53 |
+
neck_hidden_sizes = [128, 256, 512, 1024]
|
54 |
+
elif "giant" in model_name:
|
55 |
+
backbone_config = Dinov2Config.from_pretrained(
|
56 |
+
"facebook/dinov2-giant", out_indices=[10, 20, 30, 40], apply_layernorm=False, reshape_hidden_states=False
|
57 |
+
)
|
58 |
+
neck_hidden_sizes = [192, 384, 768, 1536]
|
59 |
+
else:
|
60 |
+
raise NotImplementedError("To do")
|
61 |
+
|
62 |
+
config = DPTConfig(
|
63 |
+
backbone_config=backbone_config,
|
64 |
+
neck_hidden_sizes=neck_hidden_sizes,
|
65 |
+
use_bias_in_fusion_residual=False,
|
66 |
+
add_projection=True,
|
67 |
+
)
|
68 |
+
|
69 |
+
return config
|
70 |
+
|
71 |
+
|
72 |
+
# here we list all DPT keys to be renamed (original name on the left, our name on the right)
|
73 |
+
def create_rename_keys_dpt(config):
|
74 |
+
rename_keys = []
|
75 |
+
|
76 |
+
# fmt: off
|
77 |
+
# activation postprocessing (projections, readout projections + resize blocks)
|
78 |
+
for i in range(4):
|
79 |
+
rename_keys.append((f"decode_head.reassemble_blocks.projects.{i}.conv.weight", f"neck.reassemble_stage.layers.{i}.projection.weight"))
|
80 |
+
rename_keys.append((f"decode_head.reassemble_blocks.projects.{i}.conv.bias", f"neck.reassemble_stage.layers.{i}.projection.bias"))
|
81 |
+
|
82 |
+
rename_keys.append((f"decode_head.reassemble_blocks.readout_projects.{i}.0.weight", f"neck.reassemble_stage.readout_projects.{i}.0.weight"))
|
83 |
+
rename_keys.append((f"decode_head.reassemble_blocks.readout_projects.{i}.0.bias", f"neck.reassemble_stage.readout_projects.{i}.0.bias"))
|
84 |
+
|
85 |
+
if i != 2:
|
86 |
+
rename_keys.append((f"decode_head.reassemble_blocks.resize_layers.{i}.weight", f"neck.reassemble_stage.layers.{i}.resize.weight"))
|
87 |
+
rename_keys.append((f"decode_head.reassemble_blocks.resize_layers.{i}.bias", f"neck.reassemble_stage.layers.{i}.resize.bias"))
|
88 |
+
|
89 |
+
# fusion layers
|
90 |
+
for i in range(4):
|
91 |
+
rename_keys.append((f"decode_head.fusion_blocks.{i}.project.conv.weight", f"neck.fusion_stage.layers.{i}.projection.weight"))
|
92 |
+
rename_keys.append((f"decode_head.fusion_blocks.{i}.project.conv.bias", f"neck.fusion_stage.layers.{i}.projection.bias"))
|
93 |
+
if i != 0:
|
94 |
+
rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit1.conv1.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer1.convolution1.weight"))
|
95 |
+
rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit1.conv2.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer1.convolution2.weight"))
|
96 |
+
rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit2.conv1.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer2.convolution1.weight"))
|
97 |
+
rename_keys.append((f"decode_head.fusion_blocks.{i}.res_conv_unit2.conv2.conv.weight", f"neck.fusion_stage.layers.{i}.residual_layer2.convolution2.weight"))
|
98 |
+
|
99 |
+
# neck convolutions
|
100 |
+
for i in range(4):
|
101 |
+
rename_keys.append((f"decode_head.convs.{i}.conv.weight", f"neck.convs.{i}.weight"))
|
102 |
+
|
103 |
+
# head
|
104 |
+
rename_keys.append(("decode_head.project.conv.weight", "head.projection.weight"))
|
105 |
+
rename_keys.append(("decode_head.project.conv.bias", "head.projection.bias"))
|
106 |
+
|
107 |
+
for i in range(0, 5, 2):
|
108 |
+
rename_keys.append((f"decode_head.conv_depth.head.{i}.weight", f"head.head.{i}.weight"))
|
109 |
+
rename_keys.append((f"decode_head.conv_depth.head.{i}.bias", f"head.head.{i}.bias"))
|
110 |
+
# fmt: on
|
111 |
+
|
112 |
+
return rename_keys
|
113 |
+
|
114 |
+
|
115 |
+
# here we list all backbone keys to be renamed (original name on the left, our name on the right)
|
116 |
+
def create_rename_keys_backbone(config):
|
117 |
+
rename_keys = []
|
118 |
+
|
119 |
+
# fmt: off
|
120 |
+
# patch embedding layer
|
121 |
+
rename_keys.append(("cls_token", "backbone.embeddings.cls_token"))
|
122 |
+
rename_keys.append(("mask_token", "backbone.embeddings.mask_token"))
|
123 |
+
rename_keys.append(("pos_embed", "backbone.embeddings.position_embeddings"))
|
124 |
+
rename_keys.append(("patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight"))
|
125 |
+
rename_keys.append(("patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias"))
|
126 |
+
|
127 |
+
# Transfomer encoder
|
128 |
+
for i in range(config.backbone_config.num_hidden_layers):
|
129 |
+
# layernorms
|
130 |
+
rename_keys.append((f"blocks.{i}.norm1.weight", f"backbone.encoder.layer.{i}.norm1.weight"))
|
131 |
+
rename_keys.append((f"blocks.{i}.norm1.bias", f"backbone.encoder.layer.{i}.norm1.bias"))
|
132 |
+
rename_keys.append((f"blocks.{i}.norm2.weight", f"backbone.encoder.layer.{i}.norm2.weight"))
|
133 |
+
rename_keys.append((f"blocks.{i}.norm2.bias", f"backbone.encoder.layer.{i}.norm2.bias"))
|
134 |
+
# MLP
|
135 |
+
if config.backbone_config.use_swiglu_ffn:
|
136 |
+
rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"backbone.encoder.layer.{i}.mlp.w12.weight"))
|
137 |
+
rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"backbone.encoder.layer.{i}.mlp.w12.bias"))
|
138 |
+
rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"backbone.encoder.layer.{i}.mlp.w3.weight"))
|
139 |
+
rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"backbone.encoder.layer.{i}.mlp.w3.bias"))
|
140 |
+
else:
|
141 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"backbone.encoder.layer.{i}.mlp.fc1.weight"))
|
142 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"backbone.encoder.layer.{i}.mlp.fc1.bias"))
|
143 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"backbone.encoder.layer.{i}.mlp.fc2.weight"))
|
144 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"backbone.encoder.layer.{i}.mlp.fc2.bias"))
|
145 |
+
# layerscale
|
146 |
+
rename_keys.append((f"blocks.{i}.ls1.gamma", f"backbone.encoder.layer.{i}.layer_scale1.lambda1"))
|
147 |
+
rename_keys.append((f"blocks.{i}.ls2.gamma", f"backbone.encoder.layer.{i}.layer_scale2.lambda1"))
|
148 |
+
# attention projection layer
|
149 |
+
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"backbone.encoder.layer.{i}.attention.output.dense.weight"))
|
150 |
+
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"backbone.encoder.layer.{i}.attention.output.dense.bias"))
|
151 |
+
# fmt: on
|
152 |
+
|
153 |
+
rename_keys.append(("norm.weight", "backbone.layernorm.weight"))
|
154 |
+
rename_keys.append(("norm.bias", "backbone.layernorm.bias"))
|
155 |
+
|
156 |
+
return rename_keys
|
157 |
+
|
158 |
+
|
159 |
+
# we split up the matrix of each encoder layer into queries, keys and values
|
160 |
+
def read_in_q_k_v(state_dict, config):
|
161 |
+
for i in range(config.backbone_config.num_hidden_layers):
|
162 |
+
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
|
163 |
+
in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
|
164 |
+
in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
|
165 |
+
hidden_size = config.backbone_config.hidden_size
|
166 |
+
# next, add query, keys and values (in that order) to the state dict
|
167 |
+
state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :]
|
168 |
+
state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[:hidden_size]
|
169 |
+
state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
|
170 |
+
hidden_size : hidden_size * 2, :
|
171 |
+
]
|
172 |
+
state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
|
173 |
+
hidden_size : hidden_size * 2
|
174 |
+
]
|
175 |
+
state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :]
|
176 |
+
state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-hidden_size:]
|
177 |
+
|
178 |
+
|
179 |
+
def rename_key(dct, old, new):
|
180 |
+
val = dct.pop(old)
|
181 |
+
dct[new] = val
|
182 |
+
|
183 |
+
|
184 |
+
# We will verify our results on an image of cute cats
|
185 |
+
def prepare_img():
|
186 |
+
url = "https://dl.fbaipublicfiles.com/dinov2/images/example.jpg"
|
187 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
188 |
+
return im
|
189 |
+
|
190 |
+
|
191 |
+
name_to_url = {
|
192 |
+
"dpt-dinov2-small-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_nyu_dpt_head.pth",
|
193 |
+
"dpt-dinov2-small-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_kitti_dpt_head.pth",
|
194 |
+
"dpt-dinov2-base-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_nyu_dpt_head.pth",
|
195 |
+
"dpt-dinov2-base-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_kitti_dpt_head.pth",
|
196 |
+
"dpt-dinov2-large-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_nyu_dpt_head.pth",
|
197 |
+
"dpt-dinov2-large-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_kitti_dpt_head.pth",
|
198 |
+
"dpt-dinov2-giant-nyu": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_nyu_dpt_head.pth",
|
199 |
+
"dpt-dinov2-giant-kitti": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_kitti_dpt_head.pth",
|
200 |
+
}
|
201 |
+
|
202 |
+
|
203 |
+
def get_original_pixel_values(image):
|
204 |
+
class CenterPadding(object):
|
205 |
+
def __init__(self, multiple):
|
206 |
+
super().__init__()
|
207 |
+
self.multiple = multiple
|
208 |
+
|
209 |
+
def _get_pad(self, size):
|
210 |
+
new_size = math.ceil(size / self.multiple) * self.multiple
|
211 |
+
pad_size = new_size - size
|
212 |
+
pad_size_left = pad_size // 2
|
213 |
+
pad_size_right = pad_size - pad_size_left
|
214 |
+
return pad_size_left, pad_size_right
|
215 |
+
|
216 |
+
def __call__(self, img):
|
217 |
+
pads = list(itertools.chain.from_iterable(self._get_pad(m) for m in img.shape[-2:][::-1]))
|
218 |
+
output = torch.nn.functional.pad(img, pads)
|
219 |
+
return output
|
220 |
+
|
221 |
+
def __repr__(self):
|
222 |
+
return self.__class__.__name__ + "()"
|
223 |
+
|
224 |
+
def make_depth_transform() -> transforms.Compose:
|
225 |
+
return transforms.Compose(
|
226 |
+
[
|
227 |
+
transforms.ToTensor(),
|
228 |
+
lambda x: 255.0 * x[:3], # Discard alpha component and scale by 255
|
229 |
+
transforms.Normalize(
|
230 |
+
mean=(123.675, 116.28, 103.53),
|
231 |
+
std=(58.395, 57.12, 57.375),
|
232 |
+
),
|
233 |
+
CenterPadding(multiple=14),
|
234 |
+
]
|
235 |
+
)
|
236 |
+
|
237 |
+
transform = make_depth_transform()
|
238 |
+
original_pixel_values = transform(image).unsqueeze(0)
|
239 |
+
|
240 |
+
return original_pixel_values
|
241 |
+
|
242 |
+
|
243 |
+
@torch.no_grad()
|
244 |
+
def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits):
|
245 |
+
"""
|
246 |
+
Copy/paste/tweak model's weights to our DPT structure.
|
247 |
+
"""
|
248 |
+
|
249 |
+
# define DPT configuration based on URL
|
250 |
+
checkpoint_url = name_to_url[model_name]
|
251 |
+
config = get_dpt_config(model_name)
|
252 |
+
|
253 |
+
# load original DPT state_dict from URL
|
254 |
+
print("URL:", checkpoint_url)
|
255 |
+
dpt_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"]
|
256 |
+
# rename keys
|
257 |
+
rename_keys = create_rename_keys_dpt(config)
|
258 |
+
for src, dest in rename_keys:
|
259 |
+
rename_key(dpt_state_dict, src, dest)
|
260 |
+
|
261 |
+
# load original backbone state_dict from URL
|
262 |
+
if "small" in model_name:
|
263 |
+
original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vits14")
|
264 |
+
elif "base" in model_name:
|
265 |
+
original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitb14")
|
266 |
+
elif "large" in model_name:
|
267 |
+
original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitl14")
|
268 |
+
elif "giant" in model_name:
|
269 |
+
original_model = torch.hub.load("facebookresearch/dinov2", "dinov2_vitg14")
|
270 |
+
else:
|
271 |
+
raise NotImplementedError("To do")
|
272 |
+
original_model.eval()
|
273 |
+
backbone_state_dict = original_model.state_dict()
|
274 |
+
|
275 |
+
# rename keys
|
276 |
+
rename_keys = create_rename_keys_backbone(config)
|
277 |
+
for src, dest in rename_keys:
|
278 |
+
rename_key(backbone_state_dict, src, dest)
|
279 |
+
|
280 |
+
# read in qkv matrices
|
281 |
+
read_in_q_k_v(backbone_state_dict, config)
|
282 |
+
|
283 |
+
for key, val in backbone_state_dict.copy().items():
|
284 |
+
val = backbone_state_dict.pop(key)
|
285 |
+
if "w12" in key:
|
286 |
+
key = key.replace("w12", "weights_in")
|
287 |
+
if "w3" in key:
|
288 |
+
key = key.replace("w3", "weights_out")
|
289 |
+
backbone_state_dict[key] = val
|
290 |
+
|
291 |
+
# merge state_dicts
|
292 |
+
state_dict = {**backbone_state_dict, **dpt_state_dict}
|
293 |
+
|
294 |
+
# load HuggingFace model
|
295 |
+
model = DPTForDepthEstimation(config)
|
296 |
+
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
|
297 |
+
print("Missing keys:", missing_keys)
|
298 |
+
print("Unexpected keys:", unexpected_keys)
|
299 |
+
assert missing_keys == [
|
300 |
+
"neck.fusion_stage.layers.0.residual_layer1.convolution1.weight",
|
301 |
+
"neck.fusion_stage.layers.0.residual_layer1.convolution2.weight",
|
302 |
+
]
|
303 |
+
model.eval()
|
304 |
+
|
305 |
+
# Verify image processor
|
306 |
+
processor = DPTImageProcessor(
|
307 |
+
do_resize=False,
|
308 |
+
do_rescale=False,
|
309 |
+
do_pad=True,
|
310 |
+
size_divisor=14,
|
311 |
+
do_normalize=True,
|
312 |
+
image_mean=(123.675, 116.28, 103.53),
|
313 |
+
image_std=(58.395, 57.12, 57.375),
|
314 |
+
)
|
315 |
+
|
316 |
+
image = prepare_img()
|
317 |
+
pixel_values = processor(image, return_tensors="pt").pixel_values.float()
|
318 |
+
original_pixel_values = get_original_pixel_values(image)
|
319 |
+
|
320 |
+
assert torch.allclose(pixel_values, original_pixel_values)
|
321 |
+
|
322 |
+
# Verify forward pass
|
323 |
+
with torch.no_grad():
|
324 |
+
outputs = model(pixel_values)
|
325 |
+
|
326 |
+
predicted_depth = outputs.predicted_depth
|
327 |
+
|
328 |
+
print("Shape of predicted depth:", predicted_depth.shape)
|
329 |
+
print("First values of predicted depth:", predicted_depth[0, :3, :3])
|
330 |
+
|
331 |
+
# assert logits
|
332 |
+
if verify_logits:
|
333 |
+
if model_name == "dpt-dinov2-small-nyu":
|
334 |
+
expected_shape = torch.Size([1, 576, 736])
|
335 |
+
expected_slice = torch.tensor(
|
336 |
+
[[3.3576, 3.4741, 3.4345], [3.4324, 3.5012, 3.2775], [3.2560, 3.3563, 3.2354]]
|
337 |
+
)
|
338 |
+
|
339 |
+
assert predicted_depth.shape == torch.Size(expected_shape)
|
340 |
+
assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-5)
|
341 |
+
print("Looks ok!")
|
342 |
+
|
343 |
+
if pytorch_dump_folder_path is not None:
|
344 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
345 |
+
print(f"Saving model and processor to {pytorch_dump_folder_path}")
|
346 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
347 |
+
processor.save_pretrained(pytorch_dump_folder_path)
|
348 |
+
|
349 |
+
if push_to_hub:
|
350 |
+
print("Pushing model and processor to hub...")
|
351 |
+
model.push_to_hub(repo_id=f"facebook/{model_name}")
|
352 |
+
processor.push_to_hub(repo_id=f"facebook/{model_name}")
|
353 |
+
|
354 |
+
|
355 |
+
if __name__ == "__main__":
|
356 |
+
parser = argparse.ArgumentParser()
|
357 |
+
# Required parameters
|
358 |
+
parser.add_argument(
|
359 |
+
"--model_name",
|
360 |
+
default="dpt-dinov2-small-nyu",
|
361 |
+
type=str,
|
362 |
+
choices=name_to_url.keys(),
|
363 |
+
help="Name of the model you'd like to convert.",
|
364 |
+
)
|
365 |
+
parser.add_argument(
|
366 |
+
"--pytorch_dump_folder_path",
|
367 |
+
default=None,
|
368 |
+
type=str,
|
369 |
+
help="Path to the output PyTorch model directory.",
|
370 |
+
)
|
371 |
+
parser.add_argument(
|
372 |
+
"--push_to_hub",
|
373 |
+
action="store_true",
|
374 |
+
help="Whether to push the model to the hub after conversion.",
|
375 |
+
)
|
376 |
+
parser.add_argument(
|
377 |
+
"--verify_logits",
|
378 |
+
action="store_true",
|
379 |
+
required=False,
|
380 |
+
help="Path to the output PyTorch model directory.",
|
381 |
+
)
|
382 |
+
|
383 |
+
args = parser.parse_args()
|
384 |
+
convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__init__.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
|
22 |
+
}
|
23 |
+
|
24 |
+
try:
|
25 |
+
if not is_torch_available():
|
26 |
+
raise OptionalDependencyNotAvailable()
|
27 |
+
except OptionalDependencyNotAvailable:
|
28 |
+
pass
|
29 |
+
else:
|
30 |
+
_import_structure["modeling_ernie"] = [
|
31 |
+
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
|
32 |
+
"ErnieForCausalLM",
|
33 |
+
"ErnieForMaskedLM",
|
34 |
+
"ErnieForMultipleChoice",
|
35 |
+
"ErnieForNextSentencePrediction",
|
36 |
+
"ErnieForPreTraining",
|
37 |
+
"ErnieForQuestionAnswering",
|
38 |
+
"ErnieForSequenceClassification",
|
39 |
+
"ErnieForTokenClassification",
|
40 |
+
"ErnieModel",
|
41 |
+
"ErniePreTrainedModel",
|
42 |
+
]
|
43 |
+
|
44 |
+
if TYPE_CHECKING:
|
45 |
+
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
|
46 |
+
|
47 |
+
try:
|
48 |
+
if not is_torch_available():
|
49 |
+
raise OptionalDependencyNotAvailable()
|
50 |
+
except OptionalDependencyNotAvailable:
|
51 |
+
pass
|
52 |
+
else:
|
53 |
+
from .modeling_ernie import (
|
54 |
+
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
|
55 |
+
ErnieForCausalLM,
|
56 |
+
ErnieForMaskedLM,
|
57 |
+
ErnieForMultipleChoice,
|
58 |
+
ErnieForNextSentencePrediction,
|
59 |
+
ErnieForPreTraining,
|
60 |
+
ErnieForQuestionAnswering,
|
61 |
+
ErnieForSequenceClassification,
|
62 |
+
ErnieForTokenClassification,
|
63 |
+
ErnieModel,
|
64 |
+
ErniePreTrainedModel,
|
65 |
+
)
|
66 |
+
|
67 |
+
else:
|
68 |
+
import sys
|
69 |
+
|
70 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.21 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc
ADDED
Binary file (6.89 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc
ADDED
Binary file (52.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" ERNIE model configuration"""
|
17 |
+
from collections import OrderedDict
|
18 |
+
from typing import Mapping
|
19 |
+
|
20 |
+
from ...configuration_utils import PretrainedConfig
|
21 |
+
from ...onnx import OnnxConfig
|
22 |
+
from ...utils import logging
|
23 |
+
|
24 |
+
|
25 |
+
logger = logging.get_logger(__name__)
|
26 |
+
|
27 |
+
|
28 |
+
from ..deprecated._archive_maps import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
29 |
+
|
30 |
+
|
31 |
+
class ErnieConfig(PretrainedConfig):
|
32 |
+
r"""
|
33 |
+
This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
|
34 |
+
instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
|
35 |
+
configuration with the defaults will yield a similar configuration to that of the ERNIE
|
36 |
+
[nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
|
37 |
+
|
38 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
39 |
+
documentation from [`PretrainedConfig`] for more information.
|
40 |
+
|
41 |
+
|
42 |
+
Args:
|
43 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
44 |
+
Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
|
45 |
+
`inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
|
46 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
47 |
+
Dimensionality of the encoder layers and the pooler layer.
|
48 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
49 |
+
Number of hidden layers in the Transformer encoder.
|
50 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
51 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
52 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
53 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
54 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
55 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
56 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
57 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
58 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
59 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
60 |
+
The dropout ratio for the attention probabilities.
|
61 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
62 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
63 |
+
just in case (e.g., 512 or 1024 or 2048).
|
64 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
65 |
+
The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
|
66 |
+
task_type_vocab_size (`int`, *optional*, defaults to 3):
|
67 |
+
The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
|
68 |
+
use_task_id (`bool`, *optional*, defaults to `False`):
|
69 |
+
Whether or not the model support `task_type_ids`
|
70 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
71 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
72 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
73 |
+
The epsilon used by the layer normalization layers.
|
74 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
75 |
+
Padding token id.
|
76 |
+
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
|
77 |
+
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
|
78 |
+
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
|
79 |
+
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
|
80 |
+
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
|
81 |
+
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
|
82 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
83 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
84 |
+
relevant if `config.is_decoder=True`.
|
85 |
+
classifier_dropout (`float`, *optional*):
|
86 |
+
The dropout ratio for the classification head.
|
87 |
+
|
88 |
+
Examples:
|
89 |
+
|
90 |
+
```python
|
91 |
+
>>> from transformers import ErnieConfig, ErnieModel
|
92 |
+
|
93 |
+
>>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
|
94 |
+
>>> configuration = ErnieConfig()
|
95 |
+
|
96 |
+
>>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
|
97 |
+
>>> model = ErnieModel(configuration)
|
98 |
+
|
99 |
+
>>> # Accessing the model configuration
|
100 |
+
>>> configuration = model.config
|
101 |
+
```"""
|
102 |
+
|
103 |
+
model_type = "ernie"
|
104 |
+
|
105 |
+
def __init__(
|
106 |
+
self,
|
107 |
+
vocab_size=30522,
|
108 |
+
hidden_size=768,
|
109 |
+
num_hidden_layers=12,
|
110 |
+
num_attention_heads=12,
|
111 |
+
intermediate_size=3072,
|
112 |
+
hidden_act="gelu",
|
113 |
+
hidden_dropout_prob=0.1,
|
114 |
+
attention_probs_dropout_prob=0.1,
|
115 |
+
max_position_embeddings=512,
|
116 |
+
type_vocab_size=2,
|
117 |
+
task_type_vocab_size=3,
|
118 |
+
use_task_id=False,
|
119 |
+
initializer_range=0.02,
|
120 |
+
layer_norm_eps=1e-12,
|
121 |
+
pad_token_id=0,
|
122 |
+
position_embedding_type="absolute",
|
123 |
+
use_cache=True,
|
124 |
+
classifier_dropout=None,
|
125 |
+
**kwargs,
|
126 |
+
):
|
127 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
128 |
+
|
129 |
+
self.vocab_size = vocab_size
|
130 |
+
self.hidden_size = hidden_size
|
131 |
+
self.num_hidden_layers = num_hidden_layers
|
132 |
+
self.num_attention_heads = num_attention_heads
|
133 |
+
self.hidden_act = hidden_act
|
134 |
+
self.intermediate_size = intermediate_size
|
135 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
136 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
137 |
+
self.max_position_embeddings = max_position_embeddings
|
138 |
+
self.type_vocab_size = type_vocab_size
|
139 |
+
self.task_type_vocab_size = task_type_vocab_size
|
140 |
+
self.use_task_id = use_task_id
|
141 |
+
self.initializer_range = initializer_range
|
142 |
+
self.layer_norm_eps = layer_norm_eps
|
143 |
+
self.position_embedding_type = position_embedding_type
|
144 |
+
self.use_cache = use_cache
|
145 |
+
self.classifier_dropout = classifier_dropout
|
146 |
+
|
147 |
+
|
148 |
+
class ErnieOnnxConfig(OnnxConfig):
|
149 |
+
@property
|
150 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
151 |
+
if self.task == "multiple-choice":
|
152 |
+
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
|
153 |
+
else:
|
154 |
+
dynamic_axis = {0: "batch", 1: "sequence"}
|
155 |
+
return OrderedDict(
|
156 |
+
[
|
157 |
+
("input_ids", dynamic_axis),
|
158 |
+
("attention_mask", dynamic_axis),
|
159 |
+
("token_type_ids", dynamic_axis),
|
160 |
+
("task_type_ids", dynamic_axis),
|
161 |
+
]
|
162 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py
ADDED
@@ -0,0 +1,1820 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""PyTorch ERNIE model."""
|
16 |
+
|
17 |
+
|
18 |
+
import math
|
19 |
+
import warnings
|
20 |
+
from dataclasses import dataclass
|
21 |
+
from typing import List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.utils.checkpoint
|
25 |
+
from torch import nn
|
26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
27 |
+
|
28 |
+
from ...activations import ACT2FN
|
29 |
+
from ...modeling_outputs import (
|
30 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
31 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
32 |
+
CausalLMOutputWithCrossAttentions,
|
33 |
+
MaskedLMOutput,
|
34 |
+
MultipleChoiceModelOutput,
|
35 |
+
NextSentencePredictorOutput,
|
36 |
+
QuestionAnsweringModelOutput,
|
37 |
+
SequenceClassifierOutput,
|
38 |
+
TokenClassifierOutput,
|
39 |
+
)
|
40 |
+
from ...modeling_utils import PreTrainedModel
|
41 |
+
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
|
42 |
+
from ...utils import (
|
43 |
+
ModelOutput,
|
44 |
+
add_code_sample_docstrings,
|
45 |
+
add_start_docstrings,
|
46 |
+
add_start_docstrings_to_model_forward,
|
47 |
+
logging,
|
48 |
+
replace_return_docstrings,
|
49 |
+
)
|
50 |
+
from .configuration_ernie import ErnieConfig
|
51 |
+
|
52 |
+
|
53 |
+
logger = logging.get_logger(__name__)
|
54 |
+
|
55 |
+
_CHECKPOINT_FOR_DOC = "nghuyong/ernie-1.0-base-zh"
|
56 |
+
_CONFIG_FOR_DOC = "ErnieConfig"
|
57 |
+
|
58 |
+
|
59 |
+
from ..deprecated._archive_maps import ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
60 |
+
|
61 |
+
|
62 |
+
class ErnieEmbeddings(nn.Module):
|
63 |
+
"""Construct the embeddings from word, position and token_type embeddings."""
|
64 |
+
|
65 |
+
def __init__(self, config):
|
66 |
+
super().__init__()
|
67 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
68 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
69 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
70 |
+
self.use_task_id = config.use_task_id
|
71 |
+
if config.use_task_id:
|
72 |
+
self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size)
|
73 |
+
|
74 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
75 |
+
# any TensorFlow checkpoint file
|
76 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
77 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
78 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
79 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
80 |
+
self.register_buffer(
|
81 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
82 |
+
)
|
83 |
+
self.register_buffer(
|
84 |
+
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
|
85 |
+
)
|
86 |
+
|
87 |
+
def forward(
|
88 |
+
self,
|
89 |
+
input_ids: Optional[torch.LongTensor] = None,
|
90 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
91 |
+
task_type_ids: Optional[torch.LongTensor] = None,
|
92 |
+
position_ids: Optional[torch.LongTensor] = None,
|
93 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
94 |
+
past_key_values_length: int = 0,
|
95 |
+
) -> torch.Tensor:
|
96 |
+
if input_ids is not None:
|
97 |
+
input_shape = input_ids.size()
|
98 |
+
else:
|
99 |
+
input_shape = inputs_embeds.size()[:-1]
|
100 |
+
|
101 |
+
seq_length = input_shape[1]
|
102 |
+
|
103 |
+
if position_ids is None:
|
104 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
105 |
+
|
106 |
+
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
|
107 |
+
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
|
108 |
+
# issue #5664
|
109 |
+
if token_type_ids is None:
|
110 |
+
if hasattr(self, "token_type_ids"):
|
111 |
+
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
|
112 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
|
113 |
+
token_type_ids = buffered_token_type_ids_expanded
|
114 |
+
else:
|
115 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
116 |
+
|
117 |
+
if inputs_embeds is None:
|
118 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
119 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
120 |
+
|
121 |
+
embeddings = inputs_embeds + token_type_embeddings
|
122 |
+
if self.position_embedding_type == "absolute":
|
123 |
+
position_embeddings = self.position_embeddings(position_ids)
|
124 |
+
embeddings += position_embeddings
|
125 |
+
|
126 |
+
# add `task_type_id` for ERNIE model
|
127 |
+
if self.use_task_id:
|
128 |
+
if task_type_ids is None:
|
129 |
+
task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
130 |
+
task_type_embeddings = self.task_type_embeddings(task_type_ids)
|
131 |
+
embeddings += task_type_embeddings
|
132 |
+
|
133 |
+
embeddings = self.LayerNorm(embeddings)
|
134 |
+
embeddings = self.dropout(embeddings)
|
135 |
+
return embeddings
|
136 |
+
|
137 |
+
|
138 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Ernie
|
139 |
+
class ErnieSelfAttention(nn.Module):
|
140 |
+
def __init__(self, config, position_embedding_type=None):
|
141 |
+
super().__init__()
|
142 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
143 |
+
raise ValueError(
|
144 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
145 |
+
f"heads ({config.num_attention_heads})"
|
146 |
+
)
|
147 |
+
|
148 |
+
self.num_attention_heads = config.num_attention_heads
|
149 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
150 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
151 |
+
|
152 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
153 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
154 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
155 |
+
|
156 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
157 |
+
self.position_embedding_type = position_embedding_type or getattr(
|
158 |
+
config, "position_embedding_type", "absolute"
|
159 |
+
)
|
160 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
161 |
+
self.max_position_embeddings = config.max_position_embeddings
|
162 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
163 |
+
|
164 |
+
self.is_decoder = config.is_decoder
|
165 |
+
|
166 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
167 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
168 |
+
x = x.view(new_x_shape)
|
169 |
+
return x.permute(0, 2, 1, 3)
|
170 |
+
|
171 |
+
def forward(
|
172 |
+
self,
|
173 |
+
hidden_states: torch.Tensor,
|
174 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
175 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
176 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
177 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
178 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
179 |
+
output_attentions: Optional[bool] = False,
|
180 |
+
) -> Tuple[torch.Tensor]:
|
181 |
+
mixed_query_layer = self.query(hidden_states)
|
182 |
+
|
183 |
+
# If this is instantiated as a cross-attention module, the keys
|
184 |
+
# and values come from an encoder; the attention mask needs to be
|
185 |
+
# such that the encoder's padding tokens are not attended to.
|
186 |
+
is_cross_attention = encoder_hidden_states is not None
|
187 |
+
|
188 |
+
if is_cross_attention and past_key_value is not None:
|
189 |
+
# reuse k,v, cross_attentions
|
190 |
+
key_layer = past_key_value[0]
|
191 |
+
value_layer = past_key_value[1]
|
192 |
+
attention_mask = encoder_attention_mask
|
193 |
+
elif is_cross_attention:
|
194 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
195 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
196 |
+
attention_mask = encoder_attention_mask
|
197 |
+
elif past_key_value is not None:
|
198 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
199 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
200 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
201 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
202 |
+
else:
|
203 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
204 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
205 |
+
|
206 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
207 |
+
|
208 |
+
use_cache = past_key_value is not None
|
209 |
+
if self.is_decoder:
|
210 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
211 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
212 |
+
# key/value_states (first "if" case)
|
213 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
214 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
215 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
216 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
217 |
+
past_key_value = (key_layer, value_layer)
|
218 |
+
|
219 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
220 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
221 |
+
|
222 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
223 |
+
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
|
224 |
+
if use_cache:
|
225 |
+
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
|
226 |
+
-1, 1
|
227 |
+
)
|
228 |
+
else:
|
229 |
+
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
230 |
+
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
231 |
+
distance = position_ids_l - position_ids_r
|
232 |
+
|
233 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
234 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
235 |
+
|
236 |
+
if self.position_embedding_type == "relative_key":
|
237 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
238 |
+
attention_scores = attention_scores + relative_position_scores
|
239 |
+
elif self.position_embedding_type == "relative_key_query":
|
240 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
241 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
242 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
243 |
+
|
244 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
245 |
+
if attention_mask is not None:
|
246 |
+
# Apply the attention mask is (precomputed for all layers in ErnieModel forward() function)
|
247 |
+
attention_scores = attention_scores + attention_mask
|
248 |
+
|
249 |
+
# Normalize the attention scores to probabilities.
|
250 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
251 |
+
|
252 |
+
# This is actually dropping out entire tokens to attend to, which might
|
253 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
254 |
+
attention_probs = self.dropout(attention_probs)
|
255 |
+
|
256 |
+
# Mask heads if we want to
|
257 |
+
if head_mask is not None:
|
258 |
+
attention_probs = attention_probs * head_mask
|
259 |
+
|
260 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
261 |
+
|
262 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
263 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
264 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
265 |
+
|
266 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
267 |
+
|
268 |
+
if self.is_decoder:
|
269 |
+
outputs = outputs + (past_key_value,)
|
270 |
+
return outputs
|
271 |
+
|
272 |
+
|
273 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Ernie
|
274 |
+
class ErnieSelfOutput(nn.Module):
|
275 |
+
def __init__(self, config):
|
276 |
+
super().__init__()
|
277 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
278 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
279 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
280 |
+
|
281 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
282 |
+
hidden_states = self.dense(hidden_states)
|
283 |
+
hidden_states = self.dropout(hidden_states)
|
284 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
285 |
+
return hidden_states
|
286 |
+
|
287 |
+
|
288 |
+
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Ernie
|
289 |
+
class ErnieAttention(nn.Module):
|
290 |
+
def __init__(self, config, position_embedding_type=None):
|
291 |
+
super().__init__()
|
292 |
+
self.self = ErnieSelfAttention(config, position_embedding_type=position_embedding_type)
|
293 |
+
self.output = ErnieSelfOutput(config)
|
294 |
+
self.pruned_heads = set()
|
295 |
+
|
296 |
+
def prune_heads(self, heads):
|
297 |
+
if len(heads) == 0:
|
298 |
+
return
|
299 |
+
heads, index = find_pruneable_heads_and_indices(
|
300 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
301 |
+
)
|
302 |
+
|
303 |
+
# Prune linear layers
|
304 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
305 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
306 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
307 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
308 |
+
|
309 |
+
# Update hyper params and store pruned heads
|
310 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
311 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
312 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
313 |
+
|
314 |
+
def forward(
|
315 |
+
self,
|
316 |
+
hidden_states: torch.Tensor,
|
317 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
318 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
319 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
320 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
321 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
322 |
+
output_attentions: Optional[bool] = False,
|
323 |
+
) -> Tuple[torch.Tensor]:
|
324 |
+
self_outputs = self.self(
|
325 |
+
hidden_states,
|
326 |
+
attention_mask,
|
327 |
+
head_mask,
|
328 |
+
encoder_hidden_states,
|
329 |
+
encoder_attention_mask,
|
330 |
+
past_key_value,
|
331 |
+
output_attentions,
|
332 |
+
)
|
333 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
334 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
335 |
+
return outputs
|
336 |
+
|
337 |
+
|
338 |
+
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Ernie
|
339 |
+
class ErnieIntermediate(nn.Module):
|
340 |
+
def __init__(self, config):
|
341 |
+
super().__init__()
|
342 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
343 |
+
if isinstance(config.hidden_act, str):
|
344 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
345 |
+
else:
|
346 |
+
self.intermediate_act_fn = config.hidden_act
|
347 |
+
|
348 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
349 |
+
hidden_states = self.dense(hidden_states)
|
350 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
351 |
+
return hidden_states
|
352 |
+
|
353 |
+
|
354 |
+
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Ernie
|
355 |
+
class ErnieOutput(nn.Module):
|
356 |
+
def __init__(self, config):
|
357 |
+
super().__init__()
|
358 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
359 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
360 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
361 |
+
|
362 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
363 |
+
hidden_states = self.dense(hidden_states)
|
364 |
+
hidden_states = self.dropout(hidden_states)
|
365 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
366 |
+
return hidden_states
|
367 |
+
|
368 |
+
|
369 |
+
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Ernie
|
370 |
+
class ErnieLayer(nn.Module):
|
371 |
+
def __init__(self, config):
|
372 |
+
super().__init__()
|
373 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
374 |
+
self.seq_len_dim = 1
|
375 |
+
self.attention = ErnieAttention(config)
|
376 |
+
self.is_decoder = config.is_decoder
|
377 |
+
self.add_cross_attention = config.add_cross_attention
|
378 |
+
if self.add_cross_attention:
|
379 |
+
if not self.is_decoder:
|
380 |
+
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
|
381 |
+
self.crossattention = ErnieAttention(config, position_embedding_type="absolute")
|
382 |
+
self.intermediate = ErnieIntermediate(config)
|
383 |
+
self.output = ErnieOutput(config)
|
384 |
+
|
385 |
+
def forward(
|
386 |
+
self,
|
387 |
+
hidden_states: torch.Tensor,
|
388 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
389 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
390 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
391 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
392 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
393 |
+
output_attentions: Optional[bool] = False,
|
394 |
+
) -> Tuple[torch.Tensor]:
|
395 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
396 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
397 |
+
self_attention_outputs = self.attention(
|
398 |
+
hidden_states,
|
399 |
+
attention_mask,
|
400 |
+
head_mask,
|
401 |
+
output_attentions=output_attentions,
|
402 |
+
past_key_value=self_attn_past_key_value,
|
403 |
+
)
|
404 |
+
attention_output = self_attention_outputs[0]
|
405 |
+
|
406 |
+
# if decoder, the last output is tuple of self-attn cache
|
407 |
+
if self.is_decoder:
|
408 |
+
outputs = self_attention_outputs[1:-1]
|
409 |
+
present_key_value = self_attention_outputs[-1]
|
410 |
+
else:
|
411 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
412 |
+
|
413 |
+
cross_attn_present_key_value = None
|
414 |
+
if self.is_decoder and encoder_hidden_states is not None:
|
415 |
+
if not hasattr(self, "crossattention"):
|
416 |
+
raise ValueError(
|
417 |
+
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
|
418 |
+
" by setting `config.add_cross_attention=True`"
|
419 |
+
)
|
420 |
+
|
421 |
+
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
|
422 |
+
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
|
423 |
+
cross_attention_outputs = self.crossattention(
|
424 |
+
attention_output,
|
425 |
+
attention_mask,
|
426 |
+
head_mask,
|
427 |
+
encoder_hidden_states,
|
428 |
+
encoder_attention_mask,
|
429 |
+
cross_attn_past_key_value,
|
430 |
+
output_attentions,
|
431 |
+
)
|
432 |
+
attention_output = cross_attention_outputs[0]
|
433 |
+
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
434 |
+
|
435 |
+
# add cross-attn cache to positions 3,4 of present_key_value tuple
|
436 |
+
cross_attn_present_key_value = cross_attention_outputs[-1]
|
437 |
+
present_key_value = present_key_value + cross_attn_present_key_value
|
438 |
+
|
439 |
+
layer_output = apply_chunking_to_forward(
|
440 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
441 |
+
)
|
442 |
+
outputs = (layer_output,) + outputs
|
443 |
+
|
444 |
+
# if decoder, return the attn key/values as the last output
|
445 |
+
if self.is_decoder:
|
446 |
+
outputs = outputs + (present_key_value,)
|
447 |
+
|
448 |
+
return outputs
|
449 |
+
|
450 |
+
def feed_forward_chunk(self, attention_output):
|
451 |
+
intermediate_output = self.intermediate(attention_output)
|
452 |
+
layer_output = self.output(intermediate_output, attention_output)
|
453 |
+
return layer_output
|
454 |
+
|
455 |
+
|
456 |
+
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Ernie
|
457 |
+
class ErnieEncoder(nn.Module):
|
458 |
+
def __init__(self, config):
|
459 |
+
super().__init__()
|
460 |
+
self.config = config
|
461 |
+
self.layer = nn.ModuleList([ErnieLayer(config) for _ in range(config.num_hidden_layers)])
|
462 |
+
self.gradient_checkpointing = False
|
463 |
+
|
464 |
+
def forward(
|
465 |
+
self,
|
466 |
+
hidden_states: torch.Tensor,
|
467 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
468 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
469 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
470 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
471 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
472 |
+
use_cache: Optional[bool] = None,
|
473 |
+
output_attentions: Optional[bool] = False,
|
474 |
+
output_hidden_states: Optional[bool] = False,
|
475 |
+
return_dict: Optional[bool] = True,
|
476 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
|
477 |
+
all_hidden_states = () if output_hidden_states else None
|
478 |
+
all_self_attentions = () if output_attentions else None
|
479 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
480 |
+
|
481 |
+
if self.gradient_checkpointing and self.training:
|
482 |
+
if use_cache:
|
483 |
+
logger.warning_once(
|
484 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
485 |
+
)
|
486 |
+
use_cache = False
|
487 |
+
|
488 |
+
next_decoder_cache = () if use_cache else None
|
489 |
+
for i, layer_module in enumerate(self.layer):
|
490 |
+
if output_hidden_states:
|
491 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
492 |
+
|
493 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
494 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
495 |
+
|
496 |
+
if self.gradient_checkpointing and self.training:
|
497 |
+
layer_outputs = self._gradient_checkpointing_func(
|
498 |
+
layer_module.__call__,
|
499 |
+
hidden_states,
|
500 |
+
attention_mask,
|
501 |
+
layer_head_mask,
|
502 |
+
encoder_hidden_states,
|
503 |
+
encoder_attention_mask,
|
504 |
+
past_key_value,
|
505 |
+
output_attentions,
|
506 |
+
)
|
507 |
+
else:
|
508 |
+
layer_outputs = layer_module(
|
509 |
+
hidden_states,
|
510 |
+
attention_mask,
|
511 |
+
layer_head_mask,
|
512 |
+
encoder_hidden_states,
|
513 |
+
encoder_attention_mask,
|
514 |
+
past_key_value,
|
515 |
+
output_attentions,
|
516 |
+
)
|
517 |
+
|
518 |
+
hidden_states = layer_outputs[0]
|
519 |
+
if use_cache:
|
520 |
+
next_decoder_cache += (layer_outputs[-1],)
|
521 |
+
if output_attentions:
|
522 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
523 |
+
if self.config.add_cross_attention:
|
524 |
+
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
|
525 |
+
|
526 |
+
if output_hidden_states:
|
527 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
528 |
+
|
529 |
+
if not return_dict:
|
530 |
+
return tuple(
|
531 |
+
v
|
532 |
+
for v in [
|
533 |
+
hidden_states,
|
534 |
+
next_decoder_cache,
|
535 |
+
all_hidden_states,
|
536 |
+
all_self_attentions,
|
537 |
+
all_cross_attentions,
|
538 |
+
]
|
539 |
+
if v is not None
|
540 |
+
)
|
541 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
542 |
+
last_hidden_state=hidden_states,
|
543 |
+
past_key_values=next_decoder_cache,
|
544 |
+
hidden_states=all_hidden_states,
|
545 |
+
attentions=all_self_attentions,
|
546 |
+
cross_attentions=all_cross_attentions,
|
547 |
+
)
|
548 |
+
|
549 |
+
|
550 |
+
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Ernie
|
551 |
+
class ErniePooler(nn.Module):
|
552 |
+
def __init__(self, config):
|
553 |
+
super().__init__()
|
554 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
555 |
+
self.activation = nn.Tanh()
|
556 |
+
|
557 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
558 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
559 |
+
# to the first token.
|
560 |
+
first_token_tensor = hidden_states[:, 0]
|
561 |
+
pooled_output = self.dense(first_token_tensor)
|
562 |
+
pooled_output = self.activation(pooled_output)
|
563 |
+
return pooled_output
|
564 |
+
|
565 |
+
|
566 |
+
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Ernie
|
567 |
+
class ErniePredictionHeadTransform(nn.Module):
|
568 |
+
def __init__(self, config):
|
569 |
+
super().__init__()
|
570 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
571 |
+
if isinstance(config.hidden_act, str):
|
572 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
573 |
+
else:
|
574 |
+
self.transform_act_fn = config.hidden_act
|
575 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
576 |
+
|
577 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
578 |
+
hidden_states = self.dense(hidden_states)
|
579 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
580 |
+
hidden_states = self.LayerNorm(hidden_states)
|
581 |
+
return hidden_states
|
582 |
+
|
583 |
+
|
584 |
+
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Ernie
|
585 |
+
class ErnieLMPredictionHead(nn.Module):
|
586 |
+
def __init__(self, config):
|
587 |
+
super().__init__()
|
588 |
+
self.transform = ErniePredictionHeadTransform(config)
|
589 |
+
|
590 |
+
# The output weights are the same as the input embeddings, but there is
|
591 |
+
# an output-only bias for each token.
|
592 |
+
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
593 |
+
|
594 |
+
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
595 |
+
|
596 |
+
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
597 |
+
self.decoder.bias = self.bias
|
598 |
+
|
599 |
+
def forward(self, hidden_states):
|
600 |
+
hidden_states = self.transform(hidden_states)
|
601 |
+
hidden_states = self.decoder(hidden_states)
|
602 |
+
return hidden_states
|
603 |
+
|
604 |
+
|
605 |
+
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Ernie
|
606 |
+
class ErnieOnlyMLMHead(nn.Module):
|
607 |
+
def __init__(self, config):
|
608 |
+
super().__init__()
|
609 |
+
self.predictions = ErnieLMPredictionHead(config)
|
610 |
+
|
611 |
+
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
|
612 |
+
prediction_scores = self.predictions(sequence_output)
|
613 |
+
return prediction_scores
|
614 |
+
|
615 |
+
|
616 |
+
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Ernie
|
617 |
+
class ErnieOnlyNSPHead(nn.Module):
|
618 |
+
def __init__(self, config):
|
619 |
+
super().__init__()
|
620 |
+
self.seq_relationship = nn.Linear(config.hidden_size, 2)
|
621 |
+
|
622 |
+
def forward(self, pooled_output):
|
623 |
+
seq_relationship_score = self.seq_relationship(pooled_output)
|
624 |
+
return seq_relationship_score
|
625 |
+
|
626 |
+
|
627 |
+
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Ernie
|
628 |
+
class ErniePreTrainingHeads(nn.Module):
|
629 |
+
def __init__(self, config):
|
630 |
+
super().__init__()
|
631 |
+
self.predictions = ErnieLMPredictionHead(config)
|
632 |
+
self.seq_relationship = nn.Linear(config.hidden_size, 2)
|
633 |
+
|
634 |
+
def forward(self, sequence_output, pooled_output):
|
635 |
+
prediction_scores = self.predictions(sequence_output)
|
636 |
+
seq_relationship_score = self.seq_relationship(pooled_output)
|
637 |
+
return prediction_scores, seq_relationship_score
|
638 |
+
|
639 |
+
|
640 |
+
class ErniePreTrainedModel(PreTrainedModel):
|
641 |
+
"""
|
642 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
643 |
+
models.
|
644 |
+
"""
|
645 |
+
|
646 |
+
config_class = ErnieConfig
|
647 |
+
base_model_prefix = "ernie"
|
648 |
+
supports_gradient_checkpointing = True
|
649 |
+
|
650 |
+
def _init_weights(self, module):
|
651 |
+
"""Initialize the weights"""
|
652 |
+
if isinstance(module, nn.Linear):
|
653 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
654 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
655 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
656 |
+
if module.bias is not None:
|
657 |
+
module.bias.data.zero_()
|
658 |
+
elif isinstance(module, nn.Embedding):
|
659 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
660 |
+
if module.padding_idx is not None:
|
661 |
+
module.weight.data[module.padding_idx].zero_()
|
662 |
+
elif isinstance(module, nn.LayerNorm):
|
663 |
+
module.bias.data.zero_()
|
664 |
+
module.weight.data.fill_(1.0)
|
665 |
+
|
666 |
+
|
667 |
+
@dataclass
|
668 |
+
# Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->Ernie
|
669 |
+
class ErnieForPreTrainingOutput(ModelOutput):
|
670 |
+
"""
|
671 |
+
Output type of [`ErnieForPreTraining`].
|
672 |
+
|
673 |
+
Args:
|
674 |
+
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
|
675 |
+
Total loss as the sum of the masked language modeling loss and the next sequence prediction
|
676 |
+
(classification) loss.
|
677 |
+
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
678 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
679 |
+
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
|
680 |
+
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
|
681 |
+
before SoftMax).
|
682 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
683 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
684 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
685 |
+
|
686 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
687 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
688 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
689 |
+
sequence_length)`.
|
690 |
+
|
691 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
692 |
+
heads.
|
693 |
+
"""
|
694 |
+
|
695 |
+
loss: Optional[torch.FloatTensor] = None
|
696 |
+
prediction_logits: torch.FloatTensor = None
|
697 |
+
seq_relationship_logits: torch.FloatTensor = None
|
698 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
699 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
700 |
+
|
701 |
+
|
702 |
+
ERNIE_START_DOCSTRING = r"""
|
703 |
+
|
704 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
705 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
706 |
+
etc.)
|
707 |
+
|
708 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
709 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
710 |
+
and behavior.
|
711 |
+
|
712 |
+
Parameters:
|
713 |
+
config ([`ErnieConfig`]): Model configuration class with all the parameters of the model.
|
714 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
715 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
716 |
+
"""
|
717 |
+
|
718 |
+
ERNIE_INPUTS_DOCSTRING = r"""
|
719 |
+
Args:
|
720 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
721 |
+
Indices of input sequence tokens in the vocabulary.
|
722 |
+
|
723 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
724 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
725 |
+
|
726 |
+
[What are input IDs?](../glossary#input-ids)
|
727 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
728 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
729 |
+
|
730 |
+
- 1 for tokens that are **not masked**,
|
731 |
+
- 0 for tokens that are **masked**.
|
732 |
+
|
733 |
+
[What are attention masks?](../glossary#attention-mask)
|
734 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
735 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
736 |
+
1]`:
|
737 |
+
|
738 |
+
- 0 corresponds to a *sentence A* token,
|
739 |
+
- 1 corresponds to a *sentence B* token.
|
740 |
+
|
741 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
742 |
+
task_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
743 |
+
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
|
744 |
+
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
|
745 |
+
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
|
746 |
+
config.task_type_vocab_size-1]
|
747 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
748 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
749 |
+
config.max_position_embeddings - 1]`.
|
750 |
+
|
751 |
+
[What are position IDs?](../glossary#position-ids)
|
752 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
753 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
754 |
+
|
755 |
+
- 1 indicates the head is **not masked**,
|
756 |
+
- 0 indicates the head is **masked**.
|
757 |
+
|
758 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
759 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
760 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
761 |
+
model's internal embedding lookup matrix.
|
762 |
+
output_attentions (`bool`, *optional*):
|
763 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
764 |
+
tensors for more detail.
|
765 |
+
output_hidden_states (`bool`, *optional*):
|
766 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
767 |
+
more detail.
|
768 |
+
return_dict (`bool`, *optional*):
|
769 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
770 |
+
"""
|
771 |
+
|
772 |
+
|
773 |
+
@add_start_docstrings(
|
774 |
+
"The bare Ernie Model transformer outputting raw hidden-states without any specific head on top.",
|
775 |
+
ERNIE_START_DOCSTRING,
|
776 |
+
)
|
777 |
+
class ErnieModel(ErniePreTrainedModel):
|
778 |
+
"""
|
779 |
+
|
780 |
+
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
781 |
+
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
|
782 |
+
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
783 |
+
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
784 |
+
|
785 |
+
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
|
786 |
+
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
|
787 |
+
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
|
788 |
+
"""
|
789 |
+
|
790 |
+
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Ernie
|
791 |
+
def __init__(self, config, add_pooling_layer=True):
|
792 |
+
super().__init__(config)
|
793 |
+
self.config = config
|
794 |
+
|
795 |
+
self.embeddings = ErnieEmbeddings(config)
|
796 |
+
self.encoder = ErnieEncoder(config)
|
797 |
+
|
798 |
+
self.pooler = ErniePooler(config) if add_pooling_layer else None
|
799 |
+
|
800 |
+
# Initialize weights and apply final processing
|
801 |
+
self.post_init()
|
802 |
+
|
803 |
+
# Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings
|
804 |
+
def get_input_embeddings(self):
|
805 |
+
return self.embeddings.word_embeddings
|
806 |
+
|
807 |
+
# Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings
|
808 |
+
def set_input_embeddings(self, value):
|
809 |
+
self.embeddings.word_embeddings = value
|
810 |
+
|
811 |
+
# Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads
|
812 |
+
def _prune_heads(self, heads_to_prune):
|
813 |
+
"""
|
814 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
815 |
+
class PreTrainedModel
|
816 |
+
"""
|
817 |
+
for layer, heads in heads_to_prune.items():
|
818 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
819 |
+
|
820 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
821 |
+
@add_code_sample_docstrings(
|
822 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
823 |
+
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
|
824 |
+
config_class=_CONFIG_FOR_DOC,
|
825 |
+
)
|
826 |
+
def forward(
|
827 |
+
self,
|
828 |
+
input_ids: Optional[torch.Tensor] = None,
|
829 |
+
attention_mask: Optional[torch.Tensor] = None,
|
830 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
831 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
832 |
+
position_ids: Optional[torch.Tensor] = None,
|
833 |
+
head_mask: Optional[torch.Tensor] = None,
|
834 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
835 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
836 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
837 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
838 |
+
use_cache: Optional[bool] = None,
|
839 |
+
output_attentions: Optional[bool] = None,
|
840 |
+
output_hidden_states: Optional[bool] = None,
|
841 |
+
return_dict: Optional[bool] = None,
|
842 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
843 |
+
r"""
|
844 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
845 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
846 |
+
the model is configured as a decoder.
|
847 |
+
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
848 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
849 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
850 |
+
|
851 |
+
- 1 for tokens that are **not masked**,
|
852 |
+
- 0 for tokens that are **masked**.
|
853 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
854 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
855 |
+
|
856 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
857 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
858 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
859 |
+
use_cache (`bool`, *optional*):
|
860 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
861 |
+
`past_key_values`).
|
862 |
+
"""
|
863 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
864 |
+
output_hidden_states = (
|
865 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
866 |
+
)
|
867 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
868 |
+
|
869 |
+
if self.config.is_decoder:
|
870 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
871 |
+
else:
|
872 |
+
use_cache = False
|
873 |
+
|
874 |
+
if input_ids is not None and inputs_embeds is not None:
|
875 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
876 |
+
elif input_ids is not None:
|
877 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
878 |
+
input_shape = input_ids.size()
|
879 |
+
elif inputs_embeds is not None:
|
880 |
+
input_shape = inputs_embeds.size()[:-1]
|
881 |
+
else:
|
882 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
883 |
+
|
884 |
+
batch_size, seq_length = input_shape
|
885 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
886 |
+
|
887 |
+
# past_key_values_length
|
888 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
889 |
+
|
890 |
+
if attention_mask is None:
|
891 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
892 |
+
|
893 |
+
if token_type_ids is None:
|
894 |
+
if hasattr(self.embeddings, "token_type_ids"):
|
895 |
+
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
|
896 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
|
897 |
+
token_type_ids = buffered_token_type_ids_expanded
|
898 |
+
else:
|
899 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
900 |
+
|
901 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
902 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
903 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
|
904 |
+
|
905 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
906 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
907 |
+
if self.config.is_decoder and encoder_hidden_states is not None:
|
908 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
909 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
910 |
+
if encoder_attention_mask is None:
|
911 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
912 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
913 |
+
else:
|
914 |
+
encoder_extended_attention_mask = None
|
915 |
+
|
916 |
+
# Prepare head mask if needed
|
917 |
+
# 1.0 in head_mask indicate we keep the head
|
918 |
+
# attention_probs has shape bsz x n_heads x N x N
|
919 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
920 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
921 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
922 |
+
|
923 |
+
embedding_output = self.embeddings(
|
924 |
+
input_ids=input_ids,
|
925 |
+
position_ids=position_ids,
|
926 |
+
token_type_ids=token_type_ids,
|
927 |
+
task_type_ids=task_type_ids,
|
928 |
+
inputs_embeds=inputs_embeds,
|
929 |
+
past_key_values_length=past_key_values_length,
|
930 |
+
)
|
931 |
+
encoder_outputs = self.encoder(
|
932 |
+
embedding_output,
|
933 |
+
attention_mask=extended_attention_mask,
|
934 |
+
head_mask=head_mask,
|
935 |
+
encoder_hidden_states=encoder_hidden_states,
|
936 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
937 |
+
past_key_values=past_key_values,
|
938 |
+
use_cache=use_cache,
|
939 |
+
output_attentions=output_attentions,
|
940 |
+
output_hidden_states=output_hidden_states,
|
941 |
+
return_dict=return_dict,
|
942 |
+
)
|
943 |
+
sequence_output = encoder_outputs[0]
|
944 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
945 |
+
|
946 |
+
if not return_dict:
|
947 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
948 |
+
|
949 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
950 |
+
last_hidden_state=sequence_output,
|
951 |
+
pooler_output=pooled_output,
|
952 |
+
past_key_values=encoder_outputs.past_key_values,
|
953 |
+
hidden_states=encoder_outputs.hidden_states,
|
954 |
+
attentions=encoder_outputs.attentions,
|
955 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
956 |
+
)
|
957 |
+
|
958 |
+
|
959 |
+
@add_start_docstrings(
|
960 |
+
"""
|
961 |
+
Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
|
962 |
+
sentence prediction (classification)` head.
|
963 |
+
""",
|
964 |
+
ERNIE_START_DOCSTRING,
|
965 |
+
)
|
966 |
+
class ErnieForPreTraining(ErniePreTrainedModel):
|
967 |
+
_tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
|
968 |
+
|
969 |
+
# Copied from transformers.models.bert.modeling_bert.BertForPreTraining.__init__ with Bert->Ernie,bert->ernie
|
970 |
+
def __init__(self, config):
|
971 |
+
super().__init__(config)
|
972 |
+
|
973 |
+
self.ernie = ErnieModel(config)
|
974 |
+
self.cls = ErniePreTrainingHeads(config)
|
975 |
+
|
976 |
+
# Initialize weights and apply final processing
|
977 |
+
self.post_init()
|
978 |
+
|
979 |
+
# Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings
|
980 |
+
def get_output_embeddings(self):
|
981 |
+
return self.cls.predictions.decoder
|
982 |
+
|
983 |
+
# Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings
|
984 |
+
def set_output_embeddings(self, new_embeddings):
|
985 |
+
self.cls.predictions.decoder = new_embeddings
|
986 |
+
|
987 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
988 |
+
@replace_return_docstrings(output_type=ErnieForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
|
989 |
+
def forward(
|
990 |
+
self,
|
991 |
+
input_ids: Optional[torch.Tensor] = None,
|
992 |
+
attention_mask: Optional[torch.Tensor] = None,
|
993 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
994 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
995 |
+
position_ids: Optional[torch.Tensor] = None,
|
996 |
+
head_mask: Optional[torch.Tensor] = None,
|
997 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
998 |
+
labels: Optional[torch.Tensor] = None,
|
999 |
+
next_sentence_label: Optional[torch.Tensor] = None,
|
1000 |
+
output_attentions: Optional[bool] = None,
|
1001 |
+
output_hidden_states: Optional[bool] = None,
|
1002 |
+
return_dict: Optional[bool] = None,
|
1003 |
+
) -> Union[Tuple[torch.Tensor], ErnieForPreTrainingOutput]:
|
1004 |
+
r"""
|
1005 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1006 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
1007 |
+
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
|
1008 |
+
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
1009 |
+
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1010 |
+
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
|
1011 |
+
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
|
1012 |
+
|
1013 |
+
- 0 indicates sequence B is a continuation of sequence A,
|
1014 |
+
- 1 indicates sequence B is a random sequence.
|
1015 |
+
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
|
1016 |
+
Used to hide legacy arguments that have been deprecated.
|
1017 |
+
|
1018 |
+
Returns:
|
1019 |
+
|
1020 |
+
Example:
|
1021 |
+
|
1022 |
+
```python
|
1023 |
+
>>> from transformers import AutoTokenizer, ErnieForPreTraining
|
1024 |
+
>>> import torch
|
1025 |
+
|
1026 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
|
1027 |
+
>>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
|
1028 |
+
|
1029 |
+
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
1030 |
+
>>> outputs = model(**inputs)
|
1031 |
+
|
1032 |
+
>>> prediction_logits = outputs.prediction_logits
|
1033 |
+
>>> seq_relationship_logits = outputs.seq_relationship_logits
|
1034 |
+
```
|
1035 |
+
"""
|
1036 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1037 |
+
|
1038 |
+
outputs = self.ernie(
|
1039 |
+
input_ids,
|
1040 |
+
attention_mask=attention_mask,
|
1041 |
+
token_type_ids=token_type_ids,
|
1042 |
+
task_type_ids=task_type_ids,
|
1043 |
+
position_ids=position_ids,
|
1044 |
+
head_mask=head_mask,
|
1045 |
+
inputs_embeds=inputs_embeds,
|
1046 |
+
output_attentions=output_attentions,
|
1047 |
+
output_hidden_states=output_hidden_states,
|
1048 |
+
return_dict=return_dict,
|
1049 |
+
)
|
1050 |
+
|
1051 |
+
sequence_output, pooled_output = outputs[:2]
|
1052 |
+
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
|
1053 |
+
|
1054 |
+
total_loss = None
|
1055 |
+
if labels is not None and next_sentence_label is not None:
|
1056 |
+
loss_fct = CrossEntropyLoss()
|
1057 |
+
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
1058 |
+
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
|
1059 |
+
total_loss = masked_lm_loss + next_sentence_loss
|
1060 |
+
|
1061 |
+
if not return_dict:
|
1062 |
+
output = (prediction_scores, seq_relationship_score) + outputs[2:]
|
1063 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
1064 |
+
|
1065 |
+
return ErnieForPreTrainingOutput(
|
1066 |
+
loss=total_loss,
|
1067 |
+
prediction_logits=prediction_scores,
|
1068 |
+
seq_relationship_logits=seq_relationship_score,
|
1069 |
+
hidden_states=outputs.hidden_states,
|
1070 |
+
attentions=outputs.attentions,
|
1071 |
+
)
|
1072 |
+
|
1073 |
+
|
1074 |
+
@add_start_docstrings(
|
1075 |
+
"""Ernie Model with a `language modeling` head on top for CLM fine-tuning.""", ERNIE_START_DOCSTRING
|
1076 |
+
)
|
1077 |
+
class ErnieForCausalLM(ErniePreTrainedModel):
|
1078 |
+
_tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
|
1079 |
+
|
1080 |
+
# Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->ErnieForCausalLM,Bert->Ernie,bert->ernie
|
1081 |
+
def __init__(self, config):
|
1082 |
+
super().__init__(config)
|
1083 |
+
|
1084 |
+
if not config.is_decoder:
|
1085 |
+
logger.warning("If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`")
|
1086 |
+
|
1087 |
+
self.ernie = ErnieModel(config, add_pooling_layer=False)
|
1088 |
+
self.cls = ErnieOnlyMLMHead(config)
|
1089 |
+
|
1090 |
+
# Initialize weights and apply final processing
|
1091 |
+
self.post_init()
|
1092 |
+
|
1093 |
+
# Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings
|
1094 |
+
def get_output_embeddings(self):
|
1095 |
+
return self.cls.predictions.decoder
|
1096 |
+
|
1097 |
+
# Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings
|
1098 |
+
def set_output_embeddings(self, new_embeddings):
|
1099 |
+
self.cls.predictions.decoder = new_embeddings
|
1100 |
+
|
1101 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1102 |
+
@add_code_sample_docstrings(
|
1103 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1104 |
+
output_type=CausalLMOutputWithCrossAttentions,
|
1105 |
+
config_class=_CONFIG_FOR_DOC,
|
1106 |
+
)
|
1107 |
+
def forward(
|
1108 |
+
self,
|
1109 |
+
input_ids: Optional[torch.Tensor] = None,
|
1110 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1111 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1112 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
1113 |
+
position_ids: Optional[torch.Tensor] = None,
|
1114 |
+
head_mask: Optional[torch.Tensor] = None,
|
1115 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1116 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1117 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
1118 |
+
labels: Optional[torch.Tensor] = None,
|
1119 |
+
past_key_values: Optional[List[torch.Tensor]] = None,
|
1120 |
+
use_cache: Optional[bool] = None,
|
1121 |
+
output_attentions: Optional[bool] = None,
|
1122 |
+
output_hidden_states: Optional[bool] = None,
|
1123 |
+
return_dict: Optional[bool] = None,
|
1124 |
+
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
|
1125 |
+
r"""
|
1126 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1127 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
1128 |
+
the model is configured as a decoder.
|
1129 |
+
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1130 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
1131 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
1132 |
+
|
1133 |
+
- 1 for tokens that are **not masked**,
|
1134 |
+
- 0 for tokens that are **masked**.
|
1135 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1136 |
+
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
1137 |
+
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
|
1138 |
+
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
|
1139 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
1140 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
1141 |
+
|
1142 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
1143 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
1144 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1145 |
+
use_cache (`bool`, *optional*):
|
1146 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1147 |
+
`past_key_values`).
|
1148 |
+
"""
|
1149 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1150 |
+
if labels is not None:
|
1151 |
+
use_cache = False
|
1152 |
+
|
1153 |
+
outputs = self.ernie(
|
1154 |
+
input_ids,
|
1155 |
+
attention_mask=attention_mask,
|
1156 |
+
token_type_ids=token_type_ids,
|
1157 |
+
task_type_ids=task_type_ids,
|
1158 |
+
position_ids=position_ids,
|
1159 |
+
head_mask=head_mask,
|
1160 |
+
inputs_embeds=inputs_embeds,
|
1161 |
+
encoder_hidden_states=encoder_hidden_states,
|
1162 |
+
encoder_attention_mask=encoder_attention_mask,
|
1163 |
+
past_key_values=past_key_values,
|
1164 |
+
use_cache=use_cache,
|
1165 |
+
output_attentions=output_attentions,
|
1166 |
+
output_hidden_states=output_hidden_states,
|
1167 |
+
return_dict=return_dict,
|
1168 |
+
)
|
1169 |
+
|
1170 |
+
sequence_output = outputs[0]
|
1171 |
+
prediction_scores = self.cls(sequence_output)
|
1172 |
+
|
1173 |
+
lm_loss = None
|
1174 |
+
if labels is not None:
|
1175 |
+
# we are doing next-token prediction; shift prediction scores and input ids by one
|
1176 |
+
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
|
1177 |
+
labels = labels[:, 1:].contiguous()
|
1178 |
+
loss_fct = CrossEntropyLoss()
|
1179 |
+
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
1180 |
+
|
1181 |
+
if not return_dict:
|
1182 |
+
output = (prediction_scores,) + outputs[2:]
|
1183 |
+
return ((lm_loss,) + output) if lm_loss is not None else output
|
1184 |
+
|
1185 |
+
return CausalLMOutputWithCrossAttentions(
|
1186 |
+
loss=lm_loss,
|
1187 |
+
logits=prediction_scores,
|
1188 |
+
past_key_values=outputs.past_key_values,
|
1189 |
+
hidden_states=outputs.hidden_states,
|
1190 |
+
attentions=outputs.attentions,
|
1191 |
+
cross_attentions=outputs.cross_attentions,
|
1192 |
+
)
|
1193 |
+
|
1194 |
+
# Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.prepare_inputs_for_generation
|
1195 |
+
def prepare_inputs_for_generation(
|
1196 |
+
self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
|
1197 |
+
):
|
1198 |
+
input_shape = input_ids.shape
|
1199 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
1200 |
+
if attention_mask is None:
|
1201 |
+
attention_mask = input_ids.new_ones(input_shape)
|
1202 |
+
|
1203 |
+
# cut decoder_input_ids if past_key_values is used
|
1204 |
+
if past_key_values is not None:
|
1205 |
+
past_length = past_key_values[0][0].shape[2]
|
1206 |
+
|
1207 |
+
# Some generation methods already pass only the last input ID
|
1208 |
+
if input_ids.shape[1] > past_length:
|
1209 |
+
remove_prefix_length = past_length
|
1210 |
+
else:
|
1211 |
+
# Default to old behavior: keep only final ID
|
1212 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
1213 |
+
|
1214 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
1215 |
+
|
1216 |
+
return {
|
1217 |
+
"input_ids": input_ids,
|
1218 |
+
"attention_mask": attention_mask,
|
1219 |
+
"past_key_values": past_key_values,
|
1220 |
+
"use_cache": use_cache,
|
1221 |
+
}
|
1222 |
+
|
1223 |
+
# Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache
|
1224 |
+
def _reorder_cache(self, past_key_values, beam_idx):
|
1225 |
+
reordered_past = ()
|
1226 |
+
for layer_past in past_key_values:
|
1227 |
+
reordered_past += (
|
1228 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1229 |
+
)
|
1230 |
+
return reordered_past
|
1231 |
+
|
1232 |
+
|
1233 |
+
@add_start_docstrings("""Ernie Model with a `language modeling` head on top.""", ERNIE_START_DOCSTRING)
|
1234 |
+
class ErnieForMaskedLM(ErniePreTrainedModel):
|
1235 |
+
_tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
|
1236 |
+
|
1237 |
+
# Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->Ernie,bert->ernie
|
1238 |
+
def __init__(self, config):
|
1239 |
+
super().__init__(config)
|
1240 |
+
|
1241 |
+
if config.is_decoder:
|
1242 |
+
logger.warning(
|
1243 |
+
"If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for "
|
1244 |
+
"bi-directional self-attention."
|
1245 |
+
)
|
1246 |
+
|
1247 |
+
self.ernie = ErnieModel(config, add_pooling_layer=False)
|
1248 |
+
self.cls = ErnieOnlyMLMHead(config)
|
1249 |
+
|
1250 |
+
# Initialize weights and apply final processing
|
1251 |
+
self.post_init()
|
1252 |
+
|
1253 |
+
# Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings
|
1254 |
+
def get_output_embeddings(self):
|
1255 |
+
return self.cls.predictions.decoder
|
1256 |
+
|
1257 |
+
# Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings
|
1258 |
+
def set_output_embeddings(self, new_embeddings):
|
1259 |
+
self.cls.predictions.decoder = new_embeddings
|
1260 |
+
|
1261 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1262 |
+
@add_code_sample_docstrings(
|
1263 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1264 |
+
output_type=MaskedLMOutput,
|
1265 |
+
config_class=_CONFIG_FOR_DOC,
|
1266 |
+
expected_output="'paris'",
|
1267 |
+
expected_loss=0.88,
|
1268 |
+
)
|
1269 |
+
def forward(
|
1270 |
+
self,
|
1271 |
+
input_ids: Optional[torch.Tensor] = None,
|
1272 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1273 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1274 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
1275 |
+
position_ids: Optional[torch.Tensor] = None,
|
1276 |
+
head_mask: Optional[torch.Tensor] = None,
|
1277 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1278 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1279 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
1280 |
+
labels: Optional[torch.Tensor] = None,
|
1281 |
+
output_attentions: Optional[bool] = None,
|
1282 |
+
output_hidden_states: Optional[bool] = None,
|
1283 |
+
return_dict: Optional[bool] = None,
|
1284 |
+
) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
|
1285 |
+
r"""
|
1286 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1287 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
1288 |
+
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
|
1289 |
+
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
1290 |
+
"""
|
1291 |
+
|
1292 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1293 |
+
|
1294 |
+
outputs = self.ernie(
|
1295 |
+
input_ids,
|
1296 |
+
attention_mask=attention_mask,
|
1297 |
+
token_type_ids=token_type_ids,
|
1298 |
+
task_type_ids=task_type_ids,
|
1299 |
+
position_ids=position_ids,
|
1300 |
+
head_mask=head_mask,
|
1301 |
+
inputs_embeds=inputs_embeds,
|
1302 |
+
encoder_hidden_states=encoder_hidden_states,
|
1303 |
+
encoder_attention_mask=encoder_attention_mask,
|
1304 |
+
output_attentions=output_attentions,
|
1305 |
+
output_hidden_states=output_hidden_states,
|
1306 |
+
return_dict=return_dict,
|
1307 |
+
)
|
1308 |
+
|
1309 |
+
sequence_output = outputs[0]
|
1310 |
+
prediction_scores = self.cls(sequence_output)
|
1311 |
+
|
1312 |
+
masked_lm_loss = None
|
1313 |
+
if labels is not None:
|
1314 |
+
loss_fct = CrossEntropyLoss() # -100 index = padding token
|
1315 |
+
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
1316 |
+
|
1317 |
+
if not return_dict:
|
1318 |
+
output = (prediction_scores,) + outputs[2:]
|
1319 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
1320 |
+
|
1321 |
+
return MaskedLMOutput(
|
1322 |
+
loss=masked_lm_loss,
|
1323 |
+
logits=prediction_scores,
|
1324 |
+
hidden_states=outputs.hidden_states,
|
1325 |
+
attentions=outputs.attentions,
|
1326 |
+
)
|
1327 |
+
|
1328 |
+
# Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.prepare_inputs_for_generation
|
1329 |
+
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
|
1330 |
+
input_shape = input_ids.shape
|
1331 |
+
effective_batch_size = input_shape[0]
|
1332 |
+
|
1333 |
+
# add a dummy token
|
1334 |
+
if self.config.pad_token_id is None:
|
1335 |
+
raise ValueError("The PAD token should be defined for generation")
|
1336 |
+
|
1337 |
+
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
|
1338 |
+
dummy_token = torch.full(
|
1339 |
+
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
|
1340 |
+
)
|
1341 |
+
input_ids = torch.cat([input_ids, dummy_token], dim=1)
|
1342 |
+
|
1343 |
+
return {"input_ids": input_ids, "attention_mask": attention_mask}
|
1344 |
+
|
1345 |
+
|
1346 |
+
@add_start_docstrings(
|
1347 |
+
"""Ernie Model with a `next sentence prediction (classification)` head on top.""",
|
1348 |
+
ERNIE_START_DOCSTRING,
|
1349 |
+
)
|
1350 |
+
class ErnieForNextSentencePrediction(ErniePreTrainedModel):
|
1351 |
+
# Copied from transformers.models.bert.modeling_bert.BertForNextSentencePrediction.__init__ with Bert->Ernie,bert->ernie
|
1352 |
+
def __init__(self, config):
|
1353 |
+
super().__init__(config)
|
1354 |
+
|
1355 |
+
self.ernie = ErnieModel(config)
|
1356 |
+
self.cls = ErnieOnlyNSPHead(config)
|
1357 |
+
|
1358 |
+
# Initialize weights and apply final processing
|
1359 |
+
self.post_init()
|
1360 |
+
|
1361 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1362 |
+
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
|
1363 |
+
def forward(
|
1364 |
+
self,
|
1365 |
+
input_ids: Optional[torch.Tensor] = None,
|
1366 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1367 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1368 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
1369 |
+
position_ids: Optional[torch.Tensor] = None,
|
1370 |
+
head_mask: Optional[torch.Tensor] = None,
|
1371 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1372 |
+
labels: Optional[torch.Tensor] = None,
|
1373 |
+
output_attentions: Optional[bool] = None,
|
1374 |
+
output_hidden_states: Optional[bool] = None,
|
1375 |
+
return_dict: Optional[bool] = None,
|
1376 |
+
**kwargs,
|
1377 |
+
) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
|
1378 |
+
r"""
|
1379 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1380 |
+
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
|
1381 |
+
(see `input_ids` docstring). Indices should be in `[0, 1]`:
|
1382 |
+
|
1383 |
+
- 0 indicates sequence B is a continuation of sequence A,
|
1384 |
+
- 1 indicates sequence B is a random sequence.
|
1385 |
+
|
1386 |
+
Returns:
|
1387 |
+
|
1388 |
+
Example:
|
1389 |
+
|
1390 |
+
```python
|
1391 |
+
>>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
|
1392 |
+
>>> import torch
|
1393 |
+
|
1394 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
|
1395 |
+
>>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
|
1396 |
+
|
1397 |
+
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
|
1398 |
+
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
|
1399 |
+
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
|
1400 |
+
|
1401 |
+
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
|
1402 |
+
>>> logits = outputs.logits
|
1403 |
+
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
|
1404 |
+
```
|
1405 |
+
"""
|
1406 |
+
|
1407 |
+
if "next_sentence_label" in kwargs:
|
1408 |
+
warnings.warn(
|
1409 |
+
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
|
1410 |
+
" `labels` instead.",
|
1411 |
+
FutureWarning,
|
1412 |
+
)
|
1413 |
+
labels = kwargs.pop("next_sentence_label")
|
1414 |
+
|
1415 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1416 |
+
|
1417 |
+
outputs = self.ernie(
|
1418 |
+
input_ids,
|
1419 |
+
attention_mask=attention_mask,
|
1420 |
+
token_type_ids=token_type_ids,
|
1421 |
+
task_type_ids=task_type_ids,
|
1422 |
+
position_ids=position_ids,
|
1423 |
+
head_mask=head_mask,
|
1424 |
+
inputs_embeds=inputs_embeds,
|
1425 |
+
output_attentions=output_attentions,
|
1426 |
+
output_hidden_states=output_hidden_states,
|
1427 |
+
return_dict=return_dict,
|
1428 |
+
)
|
1429 |
+
|
1430 |
+
pooled_output = outputs[1]
|
1431 |
+
|
1432 |
+
seq_relationship_scores = self.cls(pooled_output)
|
1433 |
+
|
1434 |
+
next_sentence_loss = None
|
1435 |
+
if labels is not None:
|
1436 |
+
loss_fct = CrossEntropyLoss()
|
1437 |
+
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
|
1438 |
+
|
1439 |
+
if not return_dict:
|
1440 |
+
output = (seq_relationship_scores,) + outputs[2:]
|
1441 |
+
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
|
1442 |
+
|
1443 |
+
return NextSentencePredictorOutput(
|
1444 |
+
loss=next_sentence_loss,
|
1445 |
+
logits=seq_relationship_scores,
|
1446 |
+
hidden_states=outputs.hidden_states,
|
1447 |
+
attentions=outputs.attentions,
|
1448 |
+
)
|
1449 |
+
|
1450 |
+
|
1451 |
+
@add_start_docstrings(
|
1452 |
+
"""
|
1453 |
+
Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
|
1454 |
+
output) e.g. for GLUE tasks.
|
1455 |
+
""",
|
1456 |
+
ERNIE_START_DOCSTRING,
|
1457 |
+
)
|
1458 |
+
class ErnieForSequenceClassification(ErniePreTrainedModel):
|
1459 |
+
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->Ernie,bert->ernie
|
1460 |
+
def __init__(self, config):
|
1461 |
+
super().__init__(config)
|
1462 |
+
self.num_labels = config.num_labels
|
1463 |
+
self.config = config
|
1464 |
+
|
1465 |
+
self.ernie = ErnieModel(config)
|
1466 |
+
classifier_dropout = (
|
1467 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
1468 |
+
)
|
1469 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1470 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
1471 |
+
|
1472 |
+
# Initialize weights and apply final processing
|
1473 |
+
self.post_init()
|
1474 |
+
|
1475 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1476 |
+
def forward(
|
1477 |
+
self,
|
1478 |
+
input_ids: Optional[torch.Tensor] = None,
|
1479 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1480 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1481 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
1482 |
+
position_ids: Optional[torch.Tensor] = None,
|
1483 |
+
head_mask: Optional[torch.Tensor] = None,
|
1484 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1485 |
+
labels: Optional[torch.Tensor] = None,
|
1486 |
+
output_attentions: Optional[bool] = None,
|
1487 |
+
output_hidden_states: Optional[bool] = None,
|
1488 |
+
return_dict: Optional[bool] = None,
|
1489 |
+
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
|
1490 |
+
r"""
|
1491 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1492 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1493 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1494 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1495 |
+
"""
|
1496 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1497 |
+
|
1498 |
+
outputs = self.ernie(
|
1499 |
+
input_ids,
|
1500 |
+
attention_mask=attention_mask,
|
1501 |
+
token_type_ids=token_type_ids,
|
1502 |
+
task_type_ids=task_type_ids,
|
1503 |
+
position_ids=position_ids,
|
1504 |
+
head_mask=head_mask,
|
1505 |
+
inputs_embeds=inputs_embeds,
|
1506 |
+
output_attentions=output_attentions,
|
1507 |
+
output_hidden_states=output_hidden_states,
|
1508 |
+
return_dict=return_dict,
|
1509 |
+
)
|
1510 |
+
|
1511 |
+
pooled_output = outputs[1]
|
1512 |
+
|
1513 |
+
pooled_output = self.dropout(pooled_output)
|
1514 |
+
logits = self.classifier(pooled_output)
|
1515 |
+
|
1516 |
+
loss = None
|
1517 |
+
if labels is not None:
|
1518 |
+
if self.config.problem_type is None:
|
1519 |
+
if self.num_labels == 1:
|
1520 |
+
self.config.problem_type = "regression"
|
1521 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1522 |
+
self.config.problem_type = "single_label_classification"
|
1523 |
+
else:
|
1524 |
+
self.config.problem_type = "multi_label_classification"
|
1525 |
+
|
1526 |
+
if self.config.problem_type == "regression":
|
1527 |
+
loss_fct = MSELoss()
|
1528 |
+
if self.num_labels == 1:
|
1529 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
1530 |
+
else:
|
1531 |
+
loss = loss_fct(logits, labels)
|
1532 |
+
elif self.config.problem_type == "single_label_classification":
|
1533 |
+
loss_fct = CrossEntropyLoss()
|
1534 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
1535 |
+
elif self.config.problem_type == "multi_label_classification":
|
1536 |
+
loss_fct = BCEWithLogitsLoss()
|
1537 |
+
loss = loss_fct(logits, labels)
|
1538 |
+
if not return_dict:
|
1539 |
+
output = (logits,) + outputs[2:]
|
1540 |
+
return ((loss,) + output) if loss is not None else output
|
1541 |
+
|
1542 |
+
return SequenceClassifierOutput(
|
1543 |
+
loss=loss,
|
1544 |
+
logits=logits,
|
1545 |
+
hidden_states=outputs.hidden_states,
|
1546 |
+
attentions=outputs.attentions,
|
1547 |
+
)
|
1548 |
+
|
1549 |
+
|
1550 |
+
@add_start_docstrings(
|
1551 |
+
"""
|
1552 |
+
Ernie Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
|
1553 |
+
softmax) e.g. for RocStories/SWAG tasks.
|
1554 |
+
""",
|
1555 |
+
ERNIE_START_DOCSTRING,
|
1556 |
+
)
|
1557 |
+
class ErnieForMultipleChoice(ErniePreTrainedModel):
|
1558 |
+
# Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->Ernie,bert->ernie
|
1559 |
+
def __init__(self, config):
|
1560 |
+
super().__init__(config)
|
1561 |
+
|
1562 |
+
self.ernie = ErnieModel(config)
|
1563 |
+
classifier_dropout = (
|
1564 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
1565 |
+
)
|
1566 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1567 |
+
self.classifier = nn.Linear(config.hidden_size, 1)
|
1568 |
+
|
1569 |
+
# Initialize weights and apply final processing
|
1570 |
+
self.post_init()
|
1571 |
+
|
1572 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
|
1573 |
+
@add_code_sample_docstrings(
|
1574 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1575 |
+
output_type=MultipleChoiceModelOutput,
|
1576 |
+
config_class=_CONFIG_FOR_DOC,
|
1577 |
+
)
|
1578 |
+
def forward(
|
1579 |
+
self,
|
1580 |
+
input_ids: Optional[torch.Tensor] = None,
|
1581 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1582 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1583 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
1584 |
+
position_ids: Optional[torch.Tensor] = None,
|
1585 |
+
head_mask: Optional[torch.Tensor] = None,
|
1586 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1587 |
+
labels: Optional[torch.Tensor] = None,
|
1588 |
+
output_attentions: Optional[bool] = None,
|
1589 |
+
output_hidden_states: Optional[bool] = None,
|
1590 |
+
return_dict: Optional[bool] = None,
|
1591 |
+
) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
|
1592 |
+
r"""
|
1593 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1594 |
+
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
|
1595 |
+
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
|
1596 |
+
`input_ids` above)
|
1597 |
+
"""
|
1598 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1599 |
+
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
|
1600 |
+
|
1601 |
+
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
|
1602 |
+
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
|
1603 |
+
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
|
1604 |
+
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
|
1605 |
+
inputs_embeds = (
|
1606 |
+
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
|
1607 |
+
if inputs_embeds is not None
|
1608 |
+
else None
|
1609 |
+
)
|
1610 |
+
|
1611 |
+
outputs = self.ernie(
|
1612 |
+
input_ids,
|
1613 |
+
attention_mask=attention_mask,
|
1614 |
+
token_type_ids=token_type_ids,
|
1615 |
+
task_type_ids=task_type_ids,
|
1616 |
+
position_ids=position_ids,
|
1617 |
+
head_mask=head_mask,
|
1618 |
+
inputs_embeds=inputs_embeds,
|
1619 |
+
output_attentions=output_attentions,
|
1620 |
+
output_hidden_states=output_hidden_states,
|
1621 |
+
return_dict=return_dict,
|
1622 |
+
)
|
1623 |
+
|
1624 |
+
pooled_output = outputs[1]
|
1625 |
+
|
1626 |
+
pooled_output = self.dropout(pooled_output)
|
1627 |
+
logits = self.classifier(pooled_output)
|
1628 |
+
reshaped_logits = logits.view(-1, num_choices)
|
1629 |
+
|
1630 |
+
loss = None
|
1631 |
+
if labels is not None:
|
1632 |
+
loss_fct = CrossEntropyLoss()
|
1633 |
+
loss = loss_fct(reshaped_logits, labels)
|
1634 |
+
|
1635 |
+
if not return_dict:
|
1636 |
+
output = (reshaped_logits,) + outputs[2:]
|
1637 |
+
return ((loss,) + output) if loss is not None else output
|
1638 |
+
|
1639 |
+
return MultipleChoiceModelOutput(
|
1640 |
+
loss=loss,
|
1641 |
+
logits=reshaped_logits,
|
1642 |
+
hidden_states=outputs.hidden_states,
|
1643 |
+
attentions=outputs.attentions,
|
1644 |
+
)
|
1645 |
+
|
1646 |
+
|
1647 |
+
@add_start_docstrings(
|
1648 |
+
"""
|
1649 |
+
Ernie Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
1650 |
+
Named-Entity-Recognition (NER) tasks.
|
1651 |
+
""",
|
1652 |
+
ERNIE_START_DOCSTRING,
|
1653 |
+
)
|
1654 |
+
class ErnieForTokenClassification(ErniePreTrainedModel):
|
1655 |
+
# Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->Ernie,bert->ernie
|
1656 |
+
def __init__(self, config):
|
1657 |
+
super().__init__(config)
|
1658 |
+
self.num_labels = config.num_labels
|
1659 |
+
|
1660 |
+
self.ernie = ErnieModel(config, add_pooling_layer=False)
|
1661 |
+
classifier_dropout = (
|
1662 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
1663 |
+
)
|
1664 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1665 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
1666 |
+
|
1667 |
+
# Initialize weights and apply final processing
|
1668 |
+
self.post_init()
|
1669 |
+
|
1670 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1671 |
+
def forward(
|
1672 |
+
self,
|
1673 |
+
input_ids: Optional[torch.Tensor] = None,
|
1674 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1675 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1676 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
1677 |
+
position_ids: Optional[torch.Tensor] = None,
|
1678 |
+
head_mask: Optional[torch.Tensor] = None,
|
1679 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1680 |
+
labels: Optional[torch.Tensor] = None,
|
1681 |
+
output_attentions: Optional[bool] = None,
|
1682 |
+
output_hidden_states: Optional[bool] = None,
|
1683 |
+
return_dict: Optional[bool] = None,
|
1684 |
+
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
|
1685 |
+
r"""
|
1686 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1687 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
1688 |
+
"""
|
1689 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1690 |
+
|
1691 |
+
outputs = self.ernie(
|
1692 |
+
input_ids,
|
1693 |
+
attention_mask=attention_mask,
|
1694 |
+
token_type_ids=token_type_ids,
|
1695 |
+
task_type_ids=task_type_ids,
|
1696 |
+
position_ids=position_ids,
|
1697 |
+
head_mask=head_mask,
|
1698 |
+
inputs_embeds=inputs_embeds,
|
1699 |
+
output_attentions=output_attentions,
|
1700 |
+
output_hidden_states=output_hidden_states,
|
1701 |
+
return_dict=return_dict,
|
1702 |
+
)
|
1703 |
+
|
1704 |
+
sequence_output = outputs[0]
|
1705 |
+
|
1706 |
+
sequence_output = self.dropout(sequence_output)
|
1707 |
+
logits = self.classifier(sequence_output)
|
1708 |
+
|
1709 |
+
loss = None
|
1710 |
+
if labels is not None:
|
1711 |
+
loss_fct = CrossEntropyLoss()
|
1712 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
1713 |
+
|
1714 |
+
if not return_dict:
|
1715 |
+
output = (logits,) + outputs[2:]
|
1716 |
+
return ((loss,) + output) if loss is not None else output
|
1717 |
+
|
1718 |
+
return TokenClassifierOutput(
|
1719 |
+
loss=loss,
|
1720 |
+
logits=logits,
|
1721 |
+
hidden_states=outputs.hidden_states,
|
1722 |
+
attentions=outputs.attentions,
|
1723 |
+
)
|
1724 |
+
|
1725 |
+
|
1726 |
+
@add_start_docstrings(
|
1727 |
+
"""
|
1728 |
+
Ernie Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
|
1729 |
+
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
1730 |
+
""",
|
1731 |
+
ERNIE_START_DOCSTRING,
|
1732 |
+
)
|
1733 |
+
class ErnieForQuestionAnswering(ErniePreTrainedModel):
|
1734 |
+
# Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->Ernie,bert->ernie
|
1735 |
+
def __init__(self, config):
|
1736 |
+
super().__init__(config)
|
1737 |
+
self.num_labels = config.num_labels
|
1738 |
+
|
1739 |
+
self.ernie = ErnieModel(config, add_pooling_layer=False)
|
1740 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
1741 |
+
|
1742 |
+
# Initialize weights and apply final processing
|
1743 |
+
self.post_init()
|
1744 |
+
|
1745 |
+
@add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1746 |
+
def forward(
|
1747 |
+
self,
|
1748 |
+
input_ids: Optional[torch.Tensor] = None,
|
1749 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1750 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1751 |
+
task_type_ids: Optional[torch.Tensor] = None,
|
1752 |
+
position_ids: Optional[torch.Tensor] = None,
|
1753 |
+
head_mask: Optional[torch.Tensor] = None,
|
1754 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1755 |
+
start_positions: Optional[torch.Tensor] = None,
|
1756 |
+
end_positions: Optional[torch.Tensor] = None,
|
1757 |
+
output_attentions: Optional[bool] = None,
|
1758 |
+
output_hidden_states: Optional[bool] = None,
|
1759 |
+
return_dict: Optional[bool] = None,
|
1760 |
+
) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
|
1761 |
+
r"""
|
1762 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1763 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
1764 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1765 |
+
are not taken into account for computing the loss.
|
1766 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1767 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
1768 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1769 |
+
are not taken into account for computing the loss.
|
1770 |
+
"""
|
1771 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1772 |
+
|
1773 |
+
outputs = self.ernie(
|
1774 |
+
input_ids,
|
1775 |
+
attention_mask=attention_mask,
|
1776 |
+
token_type_ids=token_type_ids,
|
1777 |
+
task_type_ids=task_type_ids,
|
1778 |
+
position_ids=position_ids,
|
1779 |
+
head_mask=head_mask,
|
1780 |
+
inputs_embeds=inputs_embeds,
|
1781 |
+
output_attentions=output_attentions,
|
1782 |
+
output_hidden_states=output_hidden_states,
|
1783 |
+
return_dict=return_dict,
|
1784 |
+
)
|
1785 |
+
|
1786 |
+
sequence_output = outputs[0]
|
1787 |
+
|
1788 |
+
logits = self.qa_outputs(sequence_output)
|
1789 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
1790 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
1791 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
1792 |
+
|
1793 |
+
total_loss = None
|
1794 |
+
if start_positions is not None and end_positions is not None:
|
1795 |
+
# If we are on multi-GPU, split add a dimension
|
1796 |
+
if len(start_positions.size()) > 1:
|
1797 |
+
start_positions = start_positions.squeeze(-1)
|
1798 |
+
if len(end_positions.size()) > 1:
|
1799 |
+
end_positions = end_positions.squeeze(-1)
|
1800 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
1801 |
+
ignored_index = start_logits.size(1)
|
1802 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
1803 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
1804 |
+
|
1805 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
1806 |
+
start_loss = loss_fct(start_logits, start_positions)
|
1807 |
+
end_loss = loss_fct(end_logits, end_positions)
|
1808 |
+
total_loss = (start_loss + end_loss) / 2
|
1809 |
+
|
1810 |
+
if not return_dict:
|
1811 |
+
output = (start_logits, end_logits) + outputs[2:]
|
1812 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
1813 |
+
|
1814 |
+
return QuestionAnsweringModelOutput(
|
1815 |
+
loss=total_loss,
|
1816 |
+
start_logits=start_logits,
|
1817 |
+
end_logits=end_logits,
|
1818 |
+
hidden_states=outputs.hidden_states,
|
1819 |
+
attentions=outputs.attentions,
|
1820 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The BigCode team and HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" GPTBigCode configuration"""
|
16 |
+
|
17 |
+
from ...configuration_utils import PretrainedConfig
|
18 |
+
from ...utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class GPTBigCodeConfig(PretrainedConfig):
|
28 |
+
"""
|
29 |
+
This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a
|
30 |
+
GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a
|
31 |
+
configuration with the defaults will yield a similar configuration to that of the GPTBigCode
|
32 |
+
[gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_size (`int`, *optional*, defaults to 50257):
|
40 |
+
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
|
41 |
+
`inputs_ids` passed when calling [`GPTBigCodeModel`].
|
42 |
+
n_positions (`int`, *optional*, defaults to 1024):
|
43 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
44 |
+
just in case (e.g., 512 or 1024 or 2048).
|
45 |
+
n_embd (`int`, *optional*, defaults to 768):
|
46 |
+
Dimensionality of the embeddings and hidden states.
|
47 |
+
n_layer (`int`, *optional*, defaults to 12):
|
48 |
+
Number of hidden layers in the Transformer encoder.
|
49 |
+
n_head (`int`, *optional*, defaults to 12):
|
50 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
51 |
+
n_inner (`int`, *optional*, defaults to None):
|
52 |
+
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
|
53 |
+
activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`):
|
54 |
+
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new",
|
55 |
+
"gelu_pytorch_tanh"]`.
|
56 |
+
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
57 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
58 |
+
embd_pdrop (`float`, *optional*, defaults to 0.1):
|
59 |
+
The dropout ratio for the embeddings.
|
60 |
+
attn_pdrop (`float`, *optional*, defaults to 0.1):
|
61 |
+
The dropout ratio for the attention.
|
62 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
63 |
+
The epsilon to use in the layer normalization layers.
|
64 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
65 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
66 |
+
scale_attn_weights (`bool`, *optional*, defaults to `True`):
|
67 |
+
Scale attention weights by dividing by sqrt(hidden_size)..
|
68 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
69 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
70 |
+
attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
|
71 |
+
Whether to call the fused softmax in float32.
|
72 |
+
scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
|
73 |
+
Whether to scale the attention softmax in float32.
|
74 |
+
attention_type (`bool`, *optional*, defaults to `True`):
|
75 |
+
Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`).
|
76 |
+
Example:
|
77 |
+
|
78 |
+
```python
|
79 |
+
>>> from transformers import GPTBigCodeConfig, GPTBigCodeModel
|
80 |
+
|
81 |
+
>>> # Initializing a GPTBigCode configuration
|
82 |
+
>>> configuration = GPTBigCodeConfig()
|
83 |
+
|
84 |
+
>>> # Initializing a model (with random weights) from the configuration
|
85 |
+
>>> model = GPTBigCodeModel(configuration)
|
86 |
+
|
87 |
+
>>> # Accessing the model configuration
|
88 |
+
>>> configuration = model.config
|
89 |
+
```"""
|
90 |
+
|
91 |
+
model_type = "gpt_bigcode"
|
92 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
93 |
+
attribute_map = {
|
94 |
+
"hidden_size": "n_embd",
|
95 |
+
"max_position_embeddings": "n_positions",
|
96 |
+
"num_attention_heads": "n_head",
|
97 |
+
"num_hidden_layers": "n_layer",
|
98 |
+
}
|
99 |
+
|
100 |
+
def __init__(
|
101 |
+
self,
|
102 |
+
vocab_size=50257,
|
103 |
+
n_positions=1024,
|
104 |
+
n_embd=768,
|
105 |
+
n_layer=12,
|
106 |
+
n_head=12,
|
107 |
+
n_inner=None,
|
108 |
+
activation_function="gelu_pytorch_tanh",
|
109 |
+
resid_pdrop=0.1,
|
110 |
+
embd_pdrop=0.1,
|
111 |
+
attn_pdrop=0.1,
|
112 |
+
layer_norm_epsilon=1e-5,
|
113 |
+
initializer_range=0.02,
|
114 |
+
scale_attn_weights=True,
|
115 |
+
use_cache=True,
|
116 |
+
bos_token_id=50256,
|
117 |
+
eos_token_id=50256,
|
118 |
+
attention_softmax_in_fp32=True,
|
119 |
+
scale_attention_softmax_in_fp32=True,
|
120 |
+
multi_query=True,
|
121 |
+
**kwargs,
|
122 |
+
):
|
123 |
+
self.vocab_size = vocab_size
|
124 |
+
self.n_positions = n_positions
|
125 |
+
self.n_embd = n_embd
|
126 |
+
self.n_layer = n_layer
|
127 |
+
self.n_head = n_head
|
128 |
+
self.n_inner = n_inner
|
129 |
+
self.activation_function = activation_function
|
130 |
+
self.resid_pdrop = resid_pdrop
|
131 |
+
self.embd_pdrop = embd_pdrop
|
132 |
+
self.attn_pdrop = attn_pdrop
|
133 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
134 |
+
self.initializer_range = initializer_range
|
135 |
+
self.scale_attn_weights = scale_attn_weights
|
136 |
+
self.use_cache = use_cache
|
137 |
+
self.attention_softmax_in_fp32 = attention_softmax_in_fp32
|
138 |
+
self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32
|
139 |
+
self.multi_query = multi_query
|
140 |
+
|
141 |
+
self.bos_token_id = bos_token_id
|
142 |
+
self.eos_token_id = eos_token_id
|
143 |
+
|
144 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
ADDED
@@ -0,0 +1,1504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The Bigcode team and HuggingFace Inc. team.
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""PyTorch GPTBigCode model."""
|
15 |
+
import math
|
16 |
+
from typing import List, Optional, Tuple, Union
|
17 |
+
|
18 |
+
import torch
|
19 |
+
import torch.nn.functional as F
|
20 |
+
import torch.utils.checkpoint
|
21 |
+
from torch import nn
|
22 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
23 |
+
|
24 |
+
from ...activations import ACT2FN
|
25 |
+
from ...modeling_attn_mask_utils import AttentionMaskConverter
|
26 |
+
from ...modeling_outputs import (
|
27 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
28 |
+
CausalLMOutputWithCrossAttentions,
|
29 |
+
SequenceClassifierOutputWithPast,
|
30 |
+
TokenClassifierOutput,
|
31 |
+
)
|
32 |
+
from ...modeling_utils import PreTrainedModel
|
33 |
+
from ...pytorch_utils import is_torch_greater_or_equal_than_2_2
|
34 |
+
from ...utils import (
|
35 |
+
add_code_sample_docstrings,
|
36 |
+
add_start_docstrings,
|
37 |
+
add_start_docstrings_to_model_forward,
|
38 |
+
is_flash_attn_2_available,
|
39 |
+
is_flash_attn_greater_or_equal_2_10,
|
40 |
+
logging,
|
41 |
+
)
|
42 |
+
from .configuration_gpt_bigcode import GPTBigCodeConfig
|
43 |
+
|
44 |
+
|
45 |
+
if is_flash_attn_2_available():
|
46 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
47 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
_CHECKPOINT_FOR_DOC = "bigcode/gpt_bigcode-santacoder"
|
53 |
+
_CONFIG_FOR_DOC = "GPTBigCodeConfig"
|
54 |
+
|
55 |
+
|
56 |
+
from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
57 |
+
|
58 |
+
|
59 |
+
# Fused kernels
|
60 |
+
# Use separate functions for each case because conditionals prevent kernel fusion.
|
61 |
+
# TODO: Could have better fused kernels depending on scaling, dropout and head mask.
|
62 |
+
# Is it doable without writing 32 functions?
|
63 |
+
@torch.jit.script
|
64 |
+
def upcast_masked_softmax(
|
65 |
+
x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor, scale: float, softmax_dtype: torch.dtype
|
66 |
+
):
|
67 |
+
input_dtype = x.dtype
|
68 |
+
x = x.to(softmax_dtype) * scale
|
69 |
+
x = torch.where(mask, x, mask_value)
|
70 |
+
x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype)
|
71 |
+
return x
|
72 |
+
|
73 |
+
|
74 |
+
@torch.jit.script
|
75 |
+
def upcast_softmax(x: torch.Tensor, scale: float, softmax_dtype: torch.dtype):
|
76 |
+
input_dtype = x.dtype
|
77 |
+
x = x.to(softmax_dtype) * scale
|
78 |
+
x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype)
|
79 |
+
return x
|
80 |
+
|
81 |
+
|
82 |
+
@torch.jit.script
|
83 |
+
def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor):
|
84 |
+
x = torch.where(mask, x, mask_value)
|
85 |
+
x = torch.nn.functional.softmax(x, dim=-1)
|
86 |
+
return x
|
87 |
+
|
88 |
+
|
89 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
90 |
+
def _get_unpad_data(attention_mask):
|
91 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
92 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
93 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
94 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
95 |
+
return (
|
96 |
+
indices,
|
97 |
+
cu_seqlens,
|
98 |
+
max_seqlen_in_batch,
|
99 |
+
)
|
100 |
+
|
101 |
+
|
102 |
+
class GPTBigCodeAttention(nn.Module):
|
103 |
+
def __init__(self, config, is_cross_attention=False, layer_idx=None):
|
104 |
+
super().__init__()
|
105 |
+
self.config = config
|
106 |
+
|
107 |
+
self.mask_value = None
|
108 |
+
self.multi_query = config.multi_query
|
109 |
+
self.embed_dim = config.hidden_size
|
110 |
+
self.num_heads = config.num_attention_heads
|
111 |
+
self.head_dim = self.embed_dim // self.num_heads
|
112 |
+
self.kv_heads = 1 if self.multi_query else self.num_heads
|
113 |
+
self.kv_dim = self.kv_heads * self.head_dim
|
114 |
+
self.split_size = self.embed_dim
|
115 |
+
self.is_causal = True
|
116 |
+
|
117 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
118 |
+
raise ValueError(
|
119 |
+
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
120 |
+
f" {self.num_heads})."
|
121 |
+
)
|
122 |
+
|
123 |
+
self.scale_attn_weights = config.scale_attn_weights
|
124 |
+
self.is_cross_attention = is_cross_attention
|
125 |
+
|
126 |
+
self.layer_idx = layer_idx
|
127 |
+
self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
|
128 |
+
self.scale_attention_softmax_in_fp32 = (
|
129 |
+
config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32
|
130 |
+
)
|
131 |
+
self.attn_pdrop = config.attn_pdrop
|
132 |
+
|
133 |
+
if self.is_cross_attention:
|
134 |
+
if self.multi_query:
|
135 |
+
raise NotImplementedError("Multi-Query Attention not supported for cross_attention")
|
136 |
+
|
137 |
+
self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim)
|
138 |
+
self.q_attn = nn.Linear(self.embed_dim, self.embed_dim)
|
139 |
+
else:
|
140 |
+
self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim)
|
141 |
+
|
142 |
+
self.c_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
143 |
+
|
144 |
+
self.attn_dropout = nn.Dropout(config.attn_pdrop)
|
145 |
+
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
146 |
+
|
147 |
+
def _get_mask_value(self, device, dtype):
|
148 |
+
# torch.where expects a tensor. We use a cache to avoid recreating it every time.
|
149 |
+
if self.mask_value is None or self.mask_value.dtype != dtype or self.mask_value.device != device:
|
150 |
+
self.mask_value = torch.full([], torch.finfo(dtype).min, dtype=dtype, device=device)
|
151 |
+
return self.mask_value
|
152 |
+
|
153 |
+
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
|
154 |
+
dtype = query.dtype
|
155 |
+
softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else dtype
|
156 |
+
upcast = dtype != softmax_dtype
|
157 |
+
|
158 |
+
unscale = self.layer_idx + 1 if self.scale_attention_softmax_in_fp32 and upcast else 1
|
159 |
+
scale_factor = unscale**-1
|
160 |
+
if self.scale_attn_weights:
|
161 |
+
scale_factor /= self.head_dim**0.5
|
162 |
+
|
163 |
+
# MQA models: (batch_size, query_length, num_heads * head_dim)
|
164 |
+
# MHA models: (batch_size, num_heads, query_length, head_dim)
|
165 |
+
query_shape = query.shape
|
166 |
+
batch_size = query_shape[0]
|
167 |
+
key_length = key.size(-1)
|
168 |
+
if self.multi_query:
|
169 |
+
# (batch_size, query_length, num_heads, head_dim) x (batch_size, head_dim, key_length)
|
170 |
+
# -> (batch_size, query_length, num_heads, key_length)
|
171 |
+
query_length = query_shape[1]
|
172 |
+
attn_shape = (batch_size, query_length, self.num_heads, key_length)
|
173 |
+
attn_view = (batch_size, query_length * self.num_heads, key_length)
|
174 |
+
# No copy needed for MQA 2, or when layer_past is provided.
|
175 |
+
query = query.reshape(batch_size, query_length * self.num_heads, self.head_dim)
|
176 |
+
else:
|
177 |
+
# (batch_size, num_heads, query_length, head_dim) x (batch_size, num_heads, head_dim, key_length)
|
178 |
+
# -> (batch_size, num_heads, query_length, key_length)
|
179 |
+
query_length = query_shape[2]
|
180 |
+
attn_shape = (batch_size, self.num_heads, query_length, key_length)
|
181 |
+
attn_view = (batch_size * self.num_heads, query_length, key_length)
|
182 |
+
# Always copies
|
183 |
+
query = query.reshape(batch_size * self.num_heads, query_length, self.head_dim)
|
184 |
+
# No copy when layer_past is provided.
|
185 |
+
key = key.reshape(batch_size * self.num_heads, self.head_dim, key_length)
|
186 |
+
|
187 |
+
attn_weights = torch.empty(attn_view, device=query.device, dtype=query.dtype)
|
188 |
+
if query.device.type == "cpu":
|
189 |
+
# This is needed because of a bug in pytorch https://github.com/pytorch/pytorch/issues/80588.
|
190 |
+
# The bug was fixed in https://github.com/pytorch/pytorch/pull/96086,
|
191 |
+
# but the fix has not been released as of pytorch version 2.0.0.
|
192 |
+
attn_weights = torch.zeros_like(attn_weights)
|
193 |
+
beta = 1
|
194 |
+
else:
|
195 |
+
beta = 0
|
196 |
+
attn_weights = torch.baddbmm(attn_weights, query, key, beta=beta, alpha=scale_factor).view(attn_shape)
|
197 |
+
|
198 |
+
if upcast:
|
199 |
+
# Use a fused kernel to prevent a large overhead from casting and scaling.
|
200 |
+
# Sub-optimal when the key length is not a multiple of 8.
|
201 |
+
if attention_mask is None:
|
202 |
+
attn_weights = upcast_softmax(attn_weights, unscale, softmax_dtype)
|
203 |
+
else:
|
204 |
+
mask_value = self._get_mask_value(attn_weights.device, softmax_dtype)
|
205 |
+
attn_weights = upcast_masked_softmax(attn_weights, attention_mask, mask_value, unscale, softmax_dtype)
|
206 |
+
else:
|
207 |
+
if attention_mask is not None:
|
208 |
+
mask_value = self._get_mask_value(attn_weights.device, softmax_dtype)
|
209 |
+
|
210 |
+
# The fused kernel is very slow when the key length is not a multiple of 8, so we skip fusion.
|
211 |
+
attn_weights = torch.where(attention_mask, attn_weights, mask_value)
|
212 |
+
|
213 |
+
attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1)
|
214 |
+
|
215 |
+
attn_weights = self.attn_dropout(attn_weights)
|
216 |
+
|
217 |
+
# Mask heads if we want to
|
218 |
+
if head_mask is not None:
|
219 |
+
if self.multi_query:
|
220 |
+
head_mask = head_mask.transpose(1, 2)
|
221 |
+
attn_weights = attn_weights * head_mask
|
222 |
+
|
223 |
+
if self.multi_query:
|
224 |
+
attn_output = torch.bmm(attn_weights.view(attn_view), value).view(query_shape)
|
225 |
+
else:
|
226 |
+
attn_output = torch.matmul(attn_weights, value)
|
227 |
+
|
228 |
+
return attn_output, attn_weights
|
229 |
+
|
230 |
+
def forward(
|
231 |
+
self,
|
232 |
+
hidden_states: torch.Tensor,
|
233 |
+
layer_past: Optional[torch.Tensor] = None,
|
234 |
+
attention_mask: Optional[torch.Tensor] = None,
|
235 |
+
head_mask: Optional[torch.Tensor] = None,
|
236 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
237 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
238 |
+
use_cache: Optional[bool] = False,
|
239 |
+
output_attentions: Optional[bool] = False,
|
240 |
+
) -> Union[
|
241 |
+
Tuple[torch.Tensor, Optional[torch.Tensor]],
|
242 |
+
Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]],
|
243 |
+
]:
|
244 |
+
if encoder_hidden_states is not None:
|
245 |
+
if not hasattr(self, "q_attn") or not self.is_cross_attention:
|
246 |
+
raise ValueError(
|
247 |
+
"If class is used as cross attention, the weights `q_attn` have to be defined. "
|
248 |
+
"Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`."
|
249 |
+
)
|
250 |
+
|
251 |
+
query = self.q_attn(hidden_states)
|
252 |
+
key_value = self.c_attn(encoder_hidden_states)
|
253 |
+
attention_mask = encoder_attention_mask
|
254 |
+
elif self.multi_query:
|
255 |
+
query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2)
|
256 |
+
else:
|
257 |
+
# Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim),
|
258 |
+
# i.e., the memory layout is not the same as GPT2.
|
259 |
+
# This makes the concatenation with past_key_value more efficient.
|
260 |
+
query, key_value = (
|
261 |
+
self.c_attn(hidden_states)
|
262 |
+
.view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim)
|
263 |
+
.transpose(1, 2)
|
264 |
+
.split((self.head_dim, 2 * self.head_dim), dim=3)
|
265 |
+
)
|
266 |
+
|
267 |
+
if layer_past is not None:
|
268 |
+
key_value = torch.cat((layer_past, key_value), dim=-2)
|
269 |
+
present = key_value if use_cache else None
|
270 |
+
|
271 |
+
key, value = key_value.split((self.head_dim, self.head_dim), dim=-1)
|
272 |
+
|
273 |
+
attn_output, attn_weights = self._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask)
|
274 |
+
|
275 |
+
if not self.multi_query:
|
276 |
+
attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape)
|
277 |
+
attn_output = self.c_proj(attn_output)
|
278 |
+
attn_output = self.resid_dropout(attn_output)
|
279 |
+
|
280 |
+
outputs = (attn_output, present)
|
281 |
+
if output_attentions:
|
282 |
+
if self.multi_query:
|
283 |
+
# Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length)
|
284 |
+
attn_weights = attn_weights.transpose(1, 2)
|
285 |
+
outputs += (attn_weights,)
|
286 |
+
|
287 |
+
return outputs # a, present, (attentions)
|
288 |
+
|
289 |
+
|
290 |
+
class GPTBigCodeFlashAttention2(GPTBigCodeAttention):
|
291 |
+
"""
|
292 |
+
GPTBigCode flash attention module. This module inherits from `GPTBigCodeAttention` as the weights of the module
|
293 |
+
stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
|
294 |
+
API of flash attention and deal with padding tokens in case the input contains any of them.
|
295 |
+
"""
|
296 |
+
|
297 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
298 |
+
def __init__(self, *args, **kwargs):
|
299 |
+
super().__init__(*args, **kwargs)
|
300 |
+
|
301 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
302 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
303 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
304 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
305 |
+
|
306 |
+
def forward(
|
307 |
+
self,
|
308 |
+
hidden_states: torch.Tensor,
|
309 |
+
layer_past: Optional[torch.Tensor] = None,
|
310 |
+
attention_mask: Optional[torch.Tensor] = None,
|
311 |
+
head_mask: Optional[torch.Tensor] = None,
|
312 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
313 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
314 |
+
use_cache: Optional[bool] = False,
|
315 |
+
output_attentions: Optional[bool] = False,
|
316 |
+
) -> Union[
|
317 |
+
Tuple[torch.Tensor, Optional[torch.Tensor]],
|
318 |
+
Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]],
|
319 |
+
]:
|
320 |
+
if encoder_hidden_states is not None:
|
321 |
+
if not hasattr(self, "q_attn") or not self.is_cross_attention:
|
322 |
+
raise ValueError(
|
323 |
+
"If class is used as cross attention, the weights `q_attn` have to be defined. "
|
324 |
+
"Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`."
|
325 |
+
)
|
326 |
+
|
327 |
+
query = self.q_attn(hidden_states)
|
328 |
+
key_value = self.c_attn(encoder_hidden_states)
|
329 |
+
attention_mask = encoder_attention_mask
|
330 |
+
elif self.multi_query:
|
331 |
+
query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2)
|
332 |
+
else:
|
333 |
+
# Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim),
|
334 |
+
# i.e., the memory layout is not the same as GPT2.
|
335 |
+
# This makes the concatenation with past_key_value more efficient.
|
336 |
+
query, key_value = (
|
337 |
+
self.c_attn(hidden_states)
|
338 |
+
.view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim)
|
339 |
+
.transpose(1, 2)
|
340 |
+
.split((self.head_dim, 2 * self.head_dim), dim=3)
|
341 |
+
)
|
342 |
+
|
343 |
+
if layer_past is not None:
|
344 |
+
key_value = torch.cat((layer_past, key_value), dim=-2)
|
345 |
+
present = key_value if use_cache else None
|
346 |
+
|
347 |
+
key, value = key_value.split((self.head_dim, self.head_dim), dim=-1)
|
348 |
+
|
349 |
+
# Flash attention requires the input to have the shape
|
350 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
351 |
+
if self.multi_query:
|
352 |
+
batch_size, query_length, _ = query.shape
|
353 |
+
query = query.reshape(batch_size, query_length, self.num_heads, self.head_dim)
|
354 |
+
key = key.unsqueeze(2)
|
355 |
+
value = value.unsqueeze(2)
|
356 |
+
else:
|
357 |
+
query_length = query.shape[2]
|
358 |
+
batch_size, _, tgt, _ = key.shape
|
359 |
+
query = query.transpose(1, 2).reshape(batch_size, query_length, self.num_heads, self.head_dim)
|
360 |
+
key = key.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim)
|
361 |
+
value = value.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim)
|
362 |
+
|
363 |
+
attn_dropout = self.attn_pdrop if self.training else 0.0
|
364 |
+
|
365 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
366 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
367 |
+
# cast them back in float16 just to be sure everything works as expected.
|
368 |
+
input_dtype = query.dtype
|
369 |
+
if input_dtype == torch.float32:
|
370 |
+
if torch.is_autocast_enabled():
|
371 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
372 |
+
# Handle the case where the model is quantized
|
373 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
374 |
+
target_dtype = self.config._pre_quantization_dtype
|
375 |
+
else:
|
376 |
+
target_dtype = self.c_attn.weight.dtype
|
377 |
+
|
378 |
+
logger.warning_once(
|
379 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
380 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
381 |
+
f" {target_dtype}."
|
382 |
+
)
|
383 |
+
query = query.to(target_dtype)
|
384 |
+
key = key.to(target_dtype)
|
385 |
+
value = value.to(target_dtype)
|
386 |
+
|
387 |
+
attn_output = self._flash_attention_forward(
|
388 |
+
query, key, value, attention_mask, query_length, dropout=attn_dropout
|
389 |
+
)
|
390 |
+
|
391 |
+
attn_weights_reshaped = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
|
392 |
+
attn_output = self.c_proj(attn_weights_reshaped)
|
393 |
+
attn_output = self.resid_dropout(attn_output)
|
394 |
+
|
395 |
+
outputs = (attn_output, present)
|
396 |
+
|
397 |
+
if output_attentions:
|
398 |
+
if self.multi_query:
|
399 |
+
# Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length)
|
400 |
+
attn_weights_reshaped = attn_weights_reshaped.transpose(1, 2)
|
401 |
+
else:
|
402 |
+
attn_weights_reshaped = None
|
403 |
+
|
404 |
+
outputs += (attn_weights_reshaped,)
|
405 |
+
|
406 |
+
return outputs # a, present, (attentions)
|
407 |
+
|
408 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
|
409 |
+
def _flash_attention_forward(
|
410 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
411 |
+
):
|
412 |
+
"""
|
413 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
414 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
415 |
+
|
416 |
+
Args:
|
417 |
+
query_states (`torch.Tensor`):
|
418 |
+
Input query states to be passed to Flash Attention API
|
419 |
+
key_states (`torch.Tensor`):
|
420 |
+
Input key states to be passed to Flash Attention API
|
421 |
+
value_states (`torch.Tensor`):
|
422 |
+
Input value states to be passed to Flash Attention API
|
423 |
+
attention_mask (`torch.Tensor`):
|
424 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
425 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
426 |
+
dropout (`float`):
|
427 |
+
Attention dropout
|
428 |
+
softmax_scale (`float`, *optional*):
|
429 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
430 |
+
"""
|
431 |
+
if not self._flash_attn_uses_top_left_mask:
|
432 |
+
causal = self.is_causal
|
433 |
+
else:
|
434 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
435 |
+
causal = self.is_causal and query_length != 1
|
436 |
+
|
437 |
+
# Contains at least one padding token in the sequence
|
438 |
+
if attention_mask is not None:
|
439 |
+
batch_size = query_states.shape[0]
|
440 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
441 |
+
query_states, key_states, value_states, attention_mask, query_length
|
442 |
+
)
|
443 |
+
|
444 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
445 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
446 |
+
|
447 |
+
attn_output_unpad = flash_attn_varlen_func(
|
448 |
+
query_states,
|
449 |
+
key_states,
|
450 |
+
value_states,
|
451 |
+
cu_seqlens_q=cu_seqlens_q,
|
452 |
+
cu_seqlens_k=cu_seqlens_k,
|
453 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
454 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
455 |
+
dropout_p=dropout,
|
456 |
+
softmax_scale=softmax_scale,
|
457 |
+
causal=causal,
|
458 |
+
)
|
459 |
+
|
460 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
461 |
+
else:
|
462 |
+
attn_output = flash_attn_func(
|
463 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
464 |
+
)
|
465 |
+
|
466 |
+
return attn_output
|
467 |
+
|
468 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
|
469 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
470 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
471 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
472 |
+
|
473 |
+
key_layer = index_first_axis(
|
474 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
475 |
+
)
|
476 |
+
value_layer = index_first_axis(
|
477 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
478 |
+
)
|
479 |
+
if query_length == kv_seq_len:
|
480 |
+
query_layer = index_first_axis(
|
481 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
482 |
+
)
|
483 |
+
cu_seqlens_q = cu_seqlens_k
|
484 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
485 |
+
indices_q = indices_k
|
486 |
+
elif query_length == 1:
|
487 |
+
max_seqlen_in_batch_q = 1
|
488 |
+
cu_seqlens_q = torch.arange(
|
489 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
490 |
+
) # There is a memcpy here, that is very bad.
|
491 |
+
indices_q = cu_seqlens_q[:-1]
|
492 |
+
query_layer = query_layer.squeeze(1)
|
493 |
+
else:
|
494 |
+
# The -q_len: slice assumes left padding.
|
495 |
+
attention_mask = attention_mask[:, -query_length:]
|
496 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
497 |
+
|
498 |
+
return (
|
499 |
+
query_layer,
|
500 |
+
key_layer,
|
501 |
+
value_layer,
|
502 |
+
indices_q,
|
503 |
+
(cu_seqlens_q, cu_seqlens_k),
|
504 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
505 |
+
)
|
506 |
+
|
507 |
+
|
508 |
+
class GPTBigCodeSdpaAttention(GPTBigCodeAttention):
|
509 |
+
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
|
510 |
+
if head_mask is not None:
|
511 |
+
# The super dispatch is done in the forward.
|
512 |
+
raise ValueError(
|
513 |
+
"PyTorch SDPA does not support head_mask. Please open an issue in Transformers repository."
|
514 |
+
)
|
515 |
+
|
516 |
+
scale = None
|
517 |
+
if not self.scale_attn_weights:
|
518 |
+
scale = 1
|
519 |
+
|
520 |
+
# MQA models: (batch_size, query_length, num_heads * head_dim)
|
521 |
+
# MHA models: (batch_size, num_heads, query_length, head_dim)
|
522 |
+
query_shape = query.shape
|
523 |
+
batch_size = query_shape[0]
|
524 |
+
key.shape[-2]
|
525 |
+
|
526 |
+
if self.multi_query:
|
527 |
+
query_length = query_shape[1]
|
528 |
+
|
529 |
+
# SDPA requires the dimension [..., sequence_length, head_dim].
|
530 |
+
query = query.view(batch_size, query_length, self.num_heads, self.head_dim).transpose(1, 2)
|
531 |
+
|
532 |
+
# Without these unsqueeze, SDPA complains as the query and key/value have a different number of dimensions.
|
533 |
+
key = key.unsqueeze(1)
|
534 |
+
value = value.unsqueeze(1)
|
535 |
+
|
536 |
+
# Although these expand are not numerically useful, PyTorch can not dispatch to memory-efficient backend
|
537 |
+
# and flash attention backend (No available kernel. Aborting execution.) from the shapes
|
538 |
+
# query = [batch_size, num_heads, query_length, head_dim]
|
539 |
+
# key = [batch_size, 1, past_length, head_dim]
|
540 |
+
# value = [batch_size, 1, past_length, head_dim]
|
541 |
+
#
|
542 |
+
# torch==2.1.2 is bugged with non-contiguous inputs with custom attn_mask (https://github.com/pytorch/pytorch/issues/112577), hence the check.
|
543 |
+
if is_torch_greater_or_equal_than_2_2:
|
544 |
+
key = key.expand(-1, self.num_heads, -1, -1)
|
545 |
+
value = value.expand(-1, self.num_heads, -1, -1)
|
546 |
+
else:
|
547 |
+
query_length = query_shape[-1]
|
548 |
+
|
549 |
+
# See the comment above.
|
550 |
+
if query.device.type == "cuda" and attention_mask is not None:
|
551 |
+
query = query.contiguous()
|
552 |
+
key = key.contiguous()
|
553 |
+
value = value.contiguous()
|
554 |
+
|
555 |
+
sdpa_result = torch.nn.functional.scaled_dot_product_attention(
|
556 |
+
query,
|
557 |
+
key,
|
558 |
+
value,
|
559 |
+
attn_mask=attention_mask,
|
560 |
+
dropout_p=self.attn_pdrop if self.training else 0.0,
|
561 |
+
# The query_length > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case query_length == 1.
|
562 |
+
is_causal=self.is_causal and attention_mask is None and query_length > 1,
|
563 |
+
scale=scale,
|
564 |
+
)
|
565 |
+
|
566 |
+
if self.multi_query:
|
567 |
+
# (batch_size, num_heads, seq_len, head_dim) --> (batch_size, seq_len, num_heads, head_dim)
|
568 |
+
sdpa_result = sdpa_result.transpose(1, 2)
|
569 |
+
|
570 |
+
# Reshape is kind of expensive here, as it does a memory copy,
|
571 |
+
# but I did not manage to make away without it (logits do not match when using view)
|
572 |
+
# (batch_size, seq_len, num_heads, head_dim) --> (batch_size, seq_len, num_heads * head_dim)
|
573 |
+
sdpa_result = sdpa_result.reshape(query_shape)
|
574 |
+
|
575 |
+
return sdpa_result, None
|
576 |
+
|
577 |
+
def forward(
|
578 |
+
self,
|
579 |
+
hidden_states: torch.Tensor,
|
580 |
+
layer_past: Optional[torch.Tensor] = None,
|
581 |
+
attention_mask: Optional[torch.Tensor] = None,
|
582 |
+
head_mask: Optional[torch.Tensor] = None,
|
583 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
584 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
585 |
+
use_cache: Optional[bool] = False,
|
586 |
+
output_attentions: Optional[bool] = False,
|
587 |
+
) -> Union[
|
588 |
+
Tuple[torch.Tensor, Optional[torch.Tensor]],
|
589 |
+
Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]],
|
590 |
+
]:
|
591 |
+
if encoder_hidden_states is not None:
|
592 |
+
if not hasattr(self, "q_attn") or not self.is_cross_attention:
|
593 |
+
raise ValueError(
|
594 |
+
"If class is used as cross attention, the weights `q_attn` have to be defined. "
|
595 |
+
"Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`."
|
596 |
+
)
|
597 |
+
|
598 |
+
query = self.q_attn(hidden_states)
|
599 |
+
key_value = self.c_attn(encoder_hidden_states)
|
600 |
+
attention_mask = encoder_attention_mask
|
601 |
+
elif self.multi_query:
|
602 |
+
query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2)
|
603 |
+
else:
|
604 |
+
# Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim),
|
605 |
+
# i.e., the memory layout is not the same as GPT2.
|
606 |
+
# This makes the concatenation with past_key_value more efficient.
|
607 |
+
query, key_value = (
|
608 |
+
self.c_attn(hidden_states)
|
609 |
+
.view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim)
|
610 |
+
.transpose(1, 2)
|
611 |
+
.split((self.head_dim, 2 * self.head_dim), dim=3)
|
612 |
+
)
|
613 |
+
|
614 |
+
if layer_past is not None:
|
615 |
+
key_value = torch.cat((layer_past, key_value), dim=-2)
|
616 |
+
present = key_value if use_cache else None
|
617 |
+
|
618 |
+
key, value = key_value.split((self.head_dim, self.head_dim), dim=-1)
|
619 |
+
|
620 |
+
if not output_attentions and head_mask is None:
|
621 |
+
# Difference with the original implementation: there is no need to transpose the key here,
|
622 |
+
# as SDPA expects seq_length to be at index -2 for the key as well
|
623 |
+
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
624 |
+
else:
|
625 |
+
# TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
|
626 |
+
logger.warning_once(
|
627 |
+
"GPTBigCodeModel is using GPTBigCodeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` and `head_mask` not None."
|
628 |
+
' Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
629 |
+
)
|
630 |
+
attn_output, attn_weights = super()._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask)
|
631 |
+
|
632 |
+
if not self.multi_query:
|
633 |
+
attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape)
|
634 |
+
attn_output = self.c_proj(attn_output)
|
635 |
+
attn_output = self.resid_dropout(attn_output)
|
636 |
+
|
637 |
+
outputs = (attn_output, present)
|
638 |
+
if output_attentions:
|
639 |
+
if self.multi_query:
|
640 |
+
# Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length)
|
641 |
+
attn_weights = attn_weights.transpose(1, 2)
|
642 |
+
outputs += (attn_weights,)
|
643 |
+
|
644 |
+
return outputs
|
645 |
+
|
646 |
+
|
647 |
+
class GPTBigCodeMLP(nn.Module):
|
648 |
+
def __init__(self, intermediate_size, config):
|
649 |
+
super().__init__()
|
650 |
+
embed_dim = config.hidden_size
|
651 |
+
self.c_fc = nn.Linear(embed_dim, intermediate_size)
|
652 |
+
self.c_proj = nn.Linear(intermediate_size, embed_dim)
|
653 |
+
self.act = ACT2FN[config.activation_function]
|
654 |
+
self.dropout = nn.Dropout(config.resid_pdrop)
|
655 |
+
|
656 |
+
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP.forward
|
657 |
+
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
|
658 |
+
hidden_states = self.c_fc(hidden_states)
|
659 |
+
hidden_states = self.act(hidden_states)
|
660 |
+
hidden_states = self.c_proj(hidden_states)
|
661 |
+
hidden_states = self.dropout(hidden_states)
|
662 |
+
return hidden_states
|
663 |
+
|
664 |
+
|
665 |
+
GPTBIGCODE_ATTENTION_CLASSES = {
|
666 |
+
"eager": GPTBigCodeAttention,
|
667 |
+
"flash_attention_2": GPTBigCodeFlashAttention2,
|
668 |
+
"sdpa": GPTBigCodeSdpaAttention,
|
669 |
+
}
|
670 |
+
|
671 |
+
|
672 |
+
class GPTBigCodeBlock(nn.Module):
|
673 |
+
def __init__(self, config, layer_idx=None):
|
674 |
+
super().__init__()
|
675 |
+
hidden_size = config.hidden_size
|
676 |
+
self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
|
677 |
+
|
678 |
+
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
679 |
+
|
680 |
+
self.attn = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
|
681 |
+
|
682 |
+
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
683 |
+
|
684 |
+
if config.add_cross_attention:
|
685 |
+
if config.multi_query:
|
686 |
+
raise NotImplementedError("Cross-attention not implemented for MQA")
|
687 |
+
|
688 |
+
self.crossattention = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](
|
689 |
+
config, is_cross_attention=True, layer_idx=layer_idx
|
690 |
+
)
|
691 |
+
|
692 |
+
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
693 |
+
|
694 |
+
self.mlp = GPTBigCodeMLP(self.inner_dim, config)
|
695 |
+
|
696 |
+
def forward(
|
697 |
+
self,
|
698 |
+
hidden_states: Optional[Tuple[torch.Tensor]],
|
699 |
+
layer_past: Optional[torch.Tensor] = None,
|
700 |
+
attention_mask: Optional[torch.Tensor] = None,
|
701 |
+
head_mask: Optional[torch.Tensor] = None,
|
702 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
703 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
704 |
+
use_cache: Optional[bool] = False,
|
705 |
+
output_attentions: Optional[bool] = False,
|
706 |
+
) -> Union[
|
707 |
+
Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
|
708 |
+
]:
|
709 |
+
residual = hidden_states
|
710 |
+
hidden_states = self.ln_1(hidden_states)
|
711 |
+
attn_outputs = self.attn(
|
712 |
+
hidden_states,
|
713 |
+
layer_past=layer_past,
|
714 |
+
attention_mask=attention_mask,
|
715 |
+
head_mask=head_mask,
|
716 |
+
use_cache=use_cache,
|
717 |
+
output_attentions=output_attentions,
|
718 |
+
)
|
719 |
+
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
|
720 |
+
outputs = attn_outputs[1:]
|
721 |
+
# residual connection
|
722 |
+
hidden_states = attn_output + residual
|
723 |
+
|
724 |
+
if encoder_hidden_states is not None:
|
725 |
+
# add one self-attention block for cross-attention
|
726 |
+
if not hasattr(self, "crossattention"):
|
727 |
+
raise ValueError(
|
728 |
+
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
|
729 |
+
"cross-attention layers by setting `config.add_cross_attention=True`"
|
730 |
+
)
|
731 |
+
residual = hidden_states
|
732 |
+
hidden_states = self.ln_cross_attn(hidden_states)
|
733 |
+
cross_attn_outputs = self.crossattention(
|
734 |
+
hidden_states,
|
735 |
+
attention_mask=attention_mask,
|
736 |
+
head_mask=head_mask,
|
737 |
+
encoder_hidden_states=encoder_hidden_states,
|
738 |
+
encoder_attention_mask=encoder_attention_mask,
|
739 |
+
output_attentions=output_attentions,
|
740 |
+
)
|
741 |
+
attn_output = cross_attn_outputs[0]
|
742 |
+
# residual connection
|
743 |
+
hidden_states = residual + attn_output
|
744 |
+
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
|
745 |
+
|
746 |
+
residual = hidden_states
|
747 |
+
hidden_states = self.ln_2(hidden_states)
|
748 |
+
feed_forward_hidden_states = self.mlp(hidden_states)
|
749 |
+
# residual connection
|
750 |
+
hidden_states = residual + feed_forward_hidden_states
|
751 |
+
|
752 |
+
if use_cache:
|
753 |
+
outputs = (hidden_states,) + outputs
|
754 |
+
else:
|
755 |
+
outputs = (hidden_states,) + outputs[1:]
|
756 |
+
|
757 |
+
return outputs # hidden_states, present, (attentions, cross_attentions)
|
758 |
+
|
759 |
+
|
760 |
+
class GPTBigCodePreTrainedModel(PreTrainedModel):
|
761 |
+
"""
|
762 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
763 |
+
models.
|
764 |
+
"""
|
765 |
+
|
766 |
+
config_class = GPTBigCodeConfig
|
767 |
+
base_model_prefix = "transformer"
|
768 |
+
supports_gradient_checkpointing = True
|
769 |
+
_no_split_modules = ["GPTBigCodeBlock"]
|
770 |
+
_skip_keys_device_placement = "past_key_values"
|
771 |
+
_supports_flash_attn_2 = True
|
772 |
+
_supports_sdpa = True
|
773 |
+
|
774 |
+
def __init__(self, *inputs, **kwargs):
|
775 |
+
super().__init__(*inputs, **kwargs)
|
776 |
+
|
777 |
+
def _init_weights(self, module):
|
778 |
+
"""Initialize the weights."""
|
779 |
+
if isinstance(module, (GPTBigCodeMLP, GPTBigCodeAttention)):
|
780 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
781 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
782 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
783 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
784 |
+
#
|
785 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
786 |
+
module.c_proj.weight.data.normal_(
|
787 |
+
mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer))
|
788 |
+
)
|
789 |
+
module.c_proj._is_hf_initialized = True
|
790 |
+
elif isinstance(module, nn.Linear):
|
791 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
792 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
793 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
794 |
+
if module.bias is not None:
|
795 |
+
module.bias.data.zero_()
|
796 |
+
elif isinstance(module, nn.Embedding):
|
797 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
798 |
+
if module.padding_idx is not None:
|
799 |
+
module.weight.data[module.padding_idx].zero_()
|
800 |
+
elif isinstance(module, nn.LayerNorm):
|
801 |
+
module.bias.data.zero_()
|
802 |
+
module.weight.data.fill_(1.0)
|
803 |
+
|
804 |
+
|
805 |
+
GPT_BIGCODE_START_DOCSTRING = r"""
|
806 |
+
|
807 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
808 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
809 |
+
etc.)
|
810 |
+
|
811 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
812 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
813 |
+
and behavior.
|
814 |
+
|
815 |
+
Parameters:
|
816 |
+
config ([`GPTBigCodeConfig`]): Model configuration class with all the parameters of the model.
|
817 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
818 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
819 |
+
"""
|
820 |
+
|
821 |
+
GPT_BIGCODE_INPUTS_DOCSTRING = r"""
|
822 |
+
Args:
|
823 |
+
input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):
|
824 |
+
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
|
825 |
+
`past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
|
826 |
+
sequence tokens in the vocabulary.
|
827 |
+
|
828 |
+
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
|
829 |
+
`input_ids`.
|
830 |
+
|
831 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
832 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
833 |
+
|
834 |
+
[What are input IDs?](../glossary#input-ids)
|
835 |
+
past_key_values (`Tuple[torch.Tensor]` of length `config.n_layers`):
|
836 |
+
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
|
837 |
+
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
|
838 |
+
their past given to this model should not be passed as `input_ids` as they have already been computed.
|
839 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
840 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
841 |
+
|
842 |
+
- 1 for tokens that are **not masked**,
|
843 |
+
- 0 for tokens that are **masked**.
|
844 |
+
|
845 |
+
If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
|
846 |
+
`past_key_values`. In other words, the `attention_mask` always has to have the length:
|
847 |
+
`len(past_key_values) + len(input_ids)`
|
848 |
+
|
849 |
+
[What are attention masks?](../glossary#attention-mask)
|
850 |
+
token_type_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*):
|
851 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
852 |
+
1]`:
|
853 |
+
|
854 |
+
- 0 corresponds to a *sentence A* token,
|
855 |
+
- 1 corresponds to a *sentence B* token.
|
856 |
+
|
857 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
858 |
+
position_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
859 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
860 |
+
config.max_position_embeddings - 1]`.
|
861 |
+
|
862 |
+
[What are position IDs?](../glossary#position-ids)
|
863 |
+
head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
864 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
865 |
+
|
866 |
+
- 1 indicates the head is **not masked**,
|
867 |
+
- 0 indicates the head is **masked**.
|
868 |
+
|
869 |
+
inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
870 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
871 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
872 |
+
model's internal embedding lookup matrix.
|
873 |
+
|
874 |
+
If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
|
875 |
+
`past_key_values`).
|
876 |
+
use_cache (`bool`, *optional*):
|
877 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
878 |
+
`past_key_values`).
|
879 |
+
output_attentions (`bool`, *optional*):
|
880 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
881 |
+
tensors for more detail.
|
882 |
+
output_hidden_states (`bool`, *optional*):
|
883 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
884 |
+
more detail.
|
885 |
+
return_dict (`bool`, *optional*):
|
886 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
887 |
+
"""
|
888 |
+
|
889 |
+
|
890 |
+
@add_start_docstrings(
|
891 |
+
"The bare GPT_BIGCODE Model transformer outputting raw hidden-states without any specific head on top.",
|
892 |
+
GPT_BIGCODE_START_DOCSTRING,
|
893 |
+
)
|
894 |
+
class GPTBigCodeModel(GPTBigCodePreTrainedModel):
|
895 |
+
def __init__(self, config):
|
896 |
+
super().__init__(config)
|
897 |
+
self.multi_query = config.multi_query
|
898 |
+
self.embed_dim = config.hidden_size
|
899 |
+
|
900 |
+
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
|
901 |
+
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
|
902 |
+
|
903 |
+
self.drop = nn.Dropout(config.embd_pdrop)
|
904 |
+
self.h = nn.ModuleList([GPTBigCodeBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
|
905 |
+
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
|
906 |
+
|
907 |
+
max_positions = config.max_position_embeddings
|
908 |
+
self.register_buffer(
|
909 |
+
"bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)), persistent=False
|
910 |
+
)
|
911 |
+
|
912 |
+
self.gradient_checkpointing = False
|
913 |
+
|
914 |
+
self._use_sdpa = config._attn_implementation == "sdpa"
|
915 |
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
916 |
+
|
917 |
+
# Initialize weights and apply final processing
|
918 |
+
self.post_init()
|
919 |
+
|
920 |
+
def get_input_embeddings(self):
|
921 |
+
return self.wte
|
922 |
+
|
923 |
+
def set_input_embeddings(self, new_embeddings):
|
924 |
+
self.wte = new_embeddings
|
925 |
+
|
926 |
+
@add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
|
927 |
+
@add_code_sample_docstrings(
|
928 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
929 |
+
output_type=BaseModelOutputWithPastAndCrossAttentions,
|
930 |
+
config_class=_CONFIG_FOR_DOC,
|
931 |
+
)
|
932 |
+
def forward(
|
933 |
+
self,
|
934 |
+
input_ids: Optional[torch.Tensor] = None,
|
935 |
+
past_key_values: Optional[List[torch.Tensor]] = None,
|
936 |
+
attention_mask: Optional[torch.Tensor] = None,
|
937 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
938 |
+
position_ids: Optional[torch.Tensor] = None,
|
939 |
+
head_mask: Optional[torch.Tensor] = None,
|
940 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
941 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
942 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
943 |
+
use_cache: Optional[bool] = None,
|
944 |
+
output_attentions: Optional[bool] = None,
|
945 |
+
output_hidden_states: Optional[bool] = None,
|
946 |
+
return_dict: Optional[bool] = None,
|
947 |
+
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
|
948 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
949 |
+
output_hidden_states = (
|
950 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
951 |
+
)
|
952 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
953 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
954 |
+
|
955 |
+
if input_ids is not None and inputs_embeds is not None:
|
956 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
957 |
+
elif input_ids is not None:
|
958 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
959 |
+
input_shape = input_ids.size()
|
960 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
961 |
+
batch_size = input_ids.shape[0]
|
962 |
+
elif inputs_embeds is not None:
|
963 |
+
input_shape = inputs_embeds.size()[:-1]
|
964 |
+
batch_size = inputs_embeds.shape[0]
|
965 |
+
else:
|
966 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
967 |
+
|
968 |
+
if batch_size <= 0:
|
969 |
+
raise ValueError("batch_size has to be defined and > 0")
|
970 |
+
|
971 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
972 |
+
|
973 |
+
if token_type_ids is not None:
|
974 |
+
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
975 |
+
|
976 |
+
if past_key_values is None:
|
977 |
+
past_length = 0
|
978 |
+
past_key_values = tuple([None] * len(self.h))
|
979 |
+
else:
|
980 |
+
past_length = past_key_values[0].size(-2)
|
981 |
+
|
982 |
+
if attention_mask is not None and len(attention_mask.shape) == 2 and position_ids is None:
|
983 |
+
# create position_ids on the fly for batch generation
|
984 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
985 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
986 |
+
if past_length > 0:
|
987 |
+
position_ids = position_ids[:, past_length : input_shape[-1] + past_length :]
|
988 |
+
elif position_ids is None:
|
989 |
+
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
|
990 |
+
position_ids = position_ids.unsqueeze(0)
|
991 |
+
|
992 |
+
# Self-attention mask.
|
993 |
+
query_length = input_shape[-1]
|
994 |
+
key_length = past_length + query_length
|
995 |
+
self_attention_mask = self.bias[None, key_length - query_length : key_length, :key_length]
|
996 |
+
|
997 |
+
if self._use_flash_attention_2:
|
998 |
+
# 2d mask is passed through the layers
|
999 |
+
attention_mask = attention_mask.bool() if (attention_mask is not None and 0 in attention_mask) else None
|
1000 |
+
encoder_attention_mask = (
|
1001 |
+
encoder_attention_mask.bool()
|
1002 |
+
if (encoder_attention_mask is not None and 0 in encoder_attention_mask)
|
1003 |
+
else None
|
1004 |
+
)
|
1005 |
+
else:
|
1006 |
+
# 4d mask is passed through the layers
|
1007 |
+
if attention_mask is not None:
|
1008 |
+
self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to(
|
1009 |
+
dtype=torch.bool, device=self_attention_mask.device
|
1010 |
+
)
|
1011 |
+
|
1012 |
+
# MQA models: (batch_size, query_length, n_heads, key_length)
|
1013 |
+
# MHA models: (batch_size, n_heads, query_length, key_length)
|
1014 |
+
self_attention_mask = self_attention_mask.unsqueeze(2 if self.multi_query else 1)
|
1015 |
+
|
1016 |
+
if self._use_sdpa and head_mask is None and not output_attentions:
|
1017 |
+
# SDPA with a custom mask is much faster in fp16/fp32 dtype rather than bool. Cast here to floating point instead of at every layer.
|
1018 |
+
dtype = self.wte.weight.dtype
|
1019 |
+
min_dtype = torch.finfo(dtype).min
|
1020 |
+
self_attention_mask = torch.where(
|
1021 |
+
self_attention_mask,
|
1022 |
+
torch.full([], 0.0, dtype=dtype, device=self_attention_mask.device),
|
1023 |
+
torch.full([], min_dtype, dtype=dtype, device=self_attention_mask.device),
|
1024 |
+
)
|
1025 |
+
|
1026 |
+
# output_attentions=True can not be supported when using SDPA, and we fall back on
|
1027 |
+
# the manual implementation that requires a 4D causal mask in all cases.
|
1028 |
+
if self.multi_query:
|
1029 |
+
# gpt_bigcode using MQA has the bad taste to use a causal mask with shape
|
1030 |
+
# [batch_size, target_length, 1, source_length], not compatible with SDPA, hence this transpose.
|
1031 |
+
self_attention_mask = self_attention_mask.transpose(1, 2)
|
1032 |
+
|
1033 |
+
if query_length > 1 and attention_mask is not None and attention_mask.device.type == "cuda":
|
1034 |
+
# From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend
|
1035 |
+
# produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213
|
1036 |
+
self_attention_mask = AttentionMaskConverter._unmask_unattended(
|
1037 |
+
self_attention_mask, min_dtype=min_dtype
|
1038 |
+
)
|
1039 |
+
|
1040 |
+
attention_mask = self_attention_mask
|
1041 |
+
|
1042 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
1043 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
1044 |
+
if (
|
1045 |
+
self.config.add_cross_attention
|
1046 |
+
and encoder_hidden_states is not None
|
1047 |
+
and encoder_attention_mask is not None
|
1048 |
+
):
|
1049 |
+
if encoder_attention_mask.dim() == 2:
|
1050 |
+
encoder_attention_mask.unsqueeze(1)
|
1051 |
+
assert encoder_attention_mask.dim() == 3
|
1052 |
+
encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1)
|
1053 |
+
else:
|
1054 |
+
encoder_attention_mask = None
|
1055 |
+
|
1056 |
+
# Prepare head mask if needed
|
1057 |
+
# 1.0 in head_mask indicate we keep the head
|
1058 |
+
# attention_probs has shape bsz x n_heads x N x N
|
1059 |
+
# head_mask has shape n_layer x batch x n_heads x N x N
|
1060 |
+
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
|
1061 |
+
|
1062 |
+
if inputs_embeds is None:
|
1063 |
+
inputs_embeds = self.wte(input_ids)
|
1064 |
+
position_embeds = self.wpe(position_ids)
|
1065 |
+
hidden_states = inputs_embeds + position_embeds
|
1066 |
+
|
1067 |
+
if token_type_ids is not None:
|
1068 |
+
token_type_embeds = self.wte(token_type_ids)
|
1069 |
+
hidden_states = hidden_states + token_type_embeds
|
1070 |
+
|
1071 |
+
hidden_states = self.drop(hidden_states)
|
1072 |
+
|
1073 |
+
output_shape = input_shape + (hidden_states.size(-1),)
|
1074 |
+
|
1075 |
+
presents = [] if use_cache else None
|
1076 |
+
all_self_attentions = () if output_attentions else None
|
1077 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
1078 |
+
all_hidden_states = () if output_hidden_states else None
|
1079 |
+
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
|
1080 |
+
if output_hidden_states:
|
1081 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
1082 |
+
|
1083 |
+
if self.gradient_checkpointing and self.training:
|
1084 |
+
outputs = self._gradient_checkpointing_func(
|
1085 |
+
block.__call__,
|
1086 |
+
hidden_states,
|
1087 |
+
None,
|
1088 |
+
attention_mask,
|
1089 |
+
head_mask[i],
|
1090 |
+
encoder_hidden_states,
|
1091 |
+
encoder_attention_mask,
|
1092 |
+
use_cache,
|
1093 |
+
output_attentions,
|
1094 |
+
)
|
1095 |
+
else:
|
1096 |
+
outputs = block(
|
1097 |
+
hidden_states,
|
1098 |
+
layer_past=layer_past,
|
1099 |
+
attention_mask=attention_mask,
|
1100 |
+
head_mask=head_mask[i],
|
1101 |
+
encoder_hidden_states=encoder_hidden_states,
|
1102 |
+
encoder_attention_mask=encoder_attention_mask,
|
1103 |
+
use_cache=use_cache,
|
1104 |
+
output_attentions=output_attentions,
|
1105 |
+
)
|
1106 |
+
|
1107 |
+
hidden_states = outputs[0]
|
1108 |
+
if use_cache:
|
1109 |
+
presents.append(outputs[1])
|
1110 |
+
|
1111 |
+
if output_attentions:
|
1112 |
+
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
|
1113 |
+
if self.config.add_cross_attention:
|
1114 |
+
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
|
1115 |
+
|
1116 |
+
hidden_states = self.ln_f(hidden_states)
|
1117 |
+
|
1118 |
+
hidden_states = hidden_states.view(output_shape)
|
1119 |
+
# Add last hidden state
|
1120 |
+
if output_hidden_states:
|
1121 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
1122 |
+
|
1123 |
+
if not return_dict:
|
1124 |
+
return tuple(
|
1125 |
+
v
|
1126 |
+
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
|
1127 |
+
if v is not None
|
1128 |
+
)
|
1129 |
+
|
1130 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
1131 |
+
last_hidden_state=hidden_states,
|
1132 |
+
past_key_values=presents,
|
1133 |
+
hidden_states=all_hidden_states,
|
1134 |
+
attentions=all_self_attentions,
|
1135 |
+
cross_attentions=all_cross_attentions,
|
1136 |
+
)
|
1137 |
+
|
1138 |
+
|
1139 |
+
@add_start_docstrings(
|
1140 |
+
"""
|
1141 |
+
The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input
|
1142 |
+
embeddings).
|
1143 |
+
""",
|
1144 |
+
GPT_BIGCODE_START_DOCSTRING,
|
1145 |
+
)
|
1146 |
+
class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel):
|
1147 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1148 |
+
|
1149 |
+
def __init__(self, config):
|
1150 |
+
super().__init__(config)
|
1151 |
+
self.transformer = GPTBigCodeModel(config)
|
1152 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
1153 |
+
|
1154 |
+
# Initialize weights and apply final processing
|
1155 |
+
self.post_init()
|
1156 |
+
|
1157 |
+
def get_output_embeddings(self):
|
1158 |
+
return self.lm_head
|
1159 |
+
|
1160 |
+
def set_output_embeddings(self, new_embeddings):
|
1161 |
+
self.lm_head = new_embeddings
|
1162 |
+
|
1163 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
|
1164 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
1165 |
+
# Omit tokens covered by past_key_values
|
1166 |
+
if past_key_values:
|
1167 |
+
if self.config.multi_query:
|
1168 |
+
past_length = past_key_values[0].shape[1]
|
1169 |
+
else:
|
1170 |
+
past_length = past_key_values[0].shape[2]
|
1171 |
+
|
1172 |
+
# Some generation methods already pass only the last input ID
|
1173 |
+
if input_ids.shape[1] > past_length:
|
1174 |
+
remove_prefix_length = past_length
|
1175 |
+
else:
|
1176 |
+
# Default to old behavior: keep only final ID
|
1177 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
1178 |
+
|
1179 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
1180 |
+
if token_type_ids is not None:
|
1181 |
+
token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
|
1182 |
+
|
1183 |
+
attention_mask = kwargs.get("attention_mask", None)
|
1184 |
+
position_ids = kwargs.get("position_ids", None)
|
1185 |
+
|
1186 |
+
if attention_mask is not None and position_ids is None:
|
1187 |
+
# create position_ids on the fly for batch generation
|
1188 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1189 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1190 |
+
if past_key_values:
|
1191 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1192 |
+
else:
|
1193 |
+
position_ids = None
|
1194 |
+
|
1195 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1196 |
+
if inputs_embeds is not None and past_key_values is None:
|
1197 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1198 |
+
else:
|
1199 |
+
model_inputs = {"input_ids": input_ids}
|
1200 |
+
|
1201 |
+
model_inputs.update(
|
1202 |
+
{
|
1203 |
+
"past_key_values": past_key_values,
|
1204 |
+
"use_cache": kwargs.get("use_cache"),
|
1205 |
+
"position_ids": position_ids,
|
1206 |
+
"attention_mask": attention_mask,
|
1207 |
+
"token_type_ids": token_type_ids,
|
1208 |
+
}
|
1209 |
+
)
|
1210 |
+
return model_inputs
|
1211 |
+
|
1212 |
+
@add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
|
1213 |
+
@add_code_sample_docstrings(
|
1214 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1215 |
+
output_type=CausalLMOutputWithCrossAttentions,
|
1216 |
+
config_class=_CONFIG_FOR_DOC,
|
1217 |
+
)
|
1218 |
+
def forward(
|
1219 |
+
self,
|
1220 |
+
input_ids: Optional[torch.Tensor] = None,
|
1221 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1222 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1223 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1224 |
+
position_ids: Optional[torch.Tensor] = None,
|
1225 |
+
head_mask: Optional[torch.Tensor] = None,
|
1226 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1227 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1228 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
1229 |
+
labels: Optional[torch.Tensor] = None,
|
1230 |
+
use_cache: Optional[bool] = None,
|
1231 |
+
output_attentions: Optional[bool] = None,
|
1232 |
+
output_hidden_states: Optional[bool] = None,
|
1233 |
+
return_dict: Optional[bool] = None,
|
1234 |
+
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
|
1235 |
+
r"""
|
1236 |
+
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1237 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
1238 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
1239 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
1240 |
+
"""
|
1241 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1242 |
+
|
1243 |
+
transformer_outputs = self.transformer(
|
1244 |
+
input_ids,
|
1245 |
+
past_key_values=past_key_values,
|
1246 |
+
attention_mask=attention_mask,
|
1247 |
+
token_type_ids=token_type_ids,
|
1248 |
+
position_ids=position_ids,
|
1249 |
+
head_mask=head_mask,
|
1250 |
+
inputs_embeds=inputs_embeds,
|
1251 |
+
encoder_hidden_states=encoder_hidden_states,
|
1252 |
+
encoder_attention_mask=encoder_attention_mask,
|
1253 |
+
use_cache=use_cache,
|
1254 |
+
output_attentions=output_attentions,
|
1255 |
+
output_hidden_states=output_hidden_states,
|
1256 |
+
return_dict=return_dict,
|
1257 |
+
)
|
1258 |
+
hidden_states = transformer_outputs[0]
|
1259 |
+
|
1260 |
+
lm_logits = self.lm_head(hidden_states)
|
1261 |
+
|
1262 |
+
loss = None
|
1263 |
+
if labels is not None:
|
1264 |
+
# Shift so that tokens < n predict n
|
1265 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
1266 |
+
shift_labels = labels[..., 1:].contiguous().to(shift_logits.device)
|
1267 |
+
# Flatten the tokens
|
1268 |
+
loss_fct = CrossEntropyLoss()
|
1269 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
1270 |
+
|
1271 |
+
if not return_dict:
|
1272 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
1273 |
+
return ((loss,) + output) if loss is not None else output
|
1274 |
+
|
1275 |
+
return CausalLMOutputWithCrossAttentions(
|
1276 |
+
loss=loss,
|
1277 |
+
logits=lm_logits,
|
1278 |
+
past_key_values=transformer_outputs.past_key_values,
|
1279 |
+
hidden_states=transformer_outputs.hidden_states,
|
1280 |
+
attentions=transformer_outputs.attentions,
|
1281 |
+
cross_attentions=transformer_outputs.cross_attentions,
|
1282 |
+
)
|
1283 |
+
|
1284 |
+
@staticmethod
|
1285 |
+
def _reorder_cache(
|
1286 |
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
1287 |
+
) -> Tuple[Tuple[torch.Tensor]]:
|
1288 |
+
"""
|
1289 |
+
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
|
1290 |
+
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
|
1291 |
+
beam_idx at every generation step.
|
1292 |
+
"""
|
1293 |
+
return tuple(layer_past.index_select(0, beam_idx.to(layer_past.device)) for layer_past in past_key_values)
|
1294 |
+
|
1295 |
+
|
1296 |
+
@add_start_docstrings(
|
1297 |
+
"""
|
1298 |
+
The GPTBigCode Model transformer with a sequence classification head on top (linear layer).
|
1299 |
+
|
1300 |
+
[`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal
|
1301 |
+
models (e.g. GPT-1) do.
|
1302 |
+
|
1303 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1304 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1305 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1306 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1307 |
+
each row of the batch).
|
1308 |
+
""",
|
1309 |
+
GPT_BIGCODE_START_DOCSTRING,
|
1310 |
+
)
|
1311 |
+
class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel):
|
1312 |
+
def __init__(self, config):
|
1313 |
+
super().__init__(config)
|
1314 |
+
self.num_labels = config.num_labels
|
1315 |
+
self.transformer = GPTBigCodeModel(config)
|
1316 |
+
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
|
1317 |
+
|
1318 |
+
# Initialize weights and apply final processing
|
1319 |
+
self.post_init()
|
1320 |
+
|
1321 |
+
@add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
|
1322 |
+
def forward(
|
1323 |
+
self,
|
1324 |
+
input_ids: Optional[torch.Tensor] = None,
|
1325 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1326 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1327 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1328 |
+
position_ids: Optional[torch.Tensor] = None,
|
1329 |
+
head_mask: Optional[torch.Tensor] = None,
|
1330 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1331 |
+
labels: Optional[torch.Tensor] = None,
|
1332 |
+
use_cache: Optional[bool] = None,
|
1333 |
+
output_attentions: Optional[bool] = None,
|
1334 |
+
output_hidden_states: Optional[bool] = None,
|
1335 |
+
return_dict: Optional[bool] = None,
|
1336 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1337 |
+
r"""
|
1338 |
+
labels (`torch.Tensor` of shape `(batch_size,)`, *optional*):
|
1339 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1340 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1341 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1342 |
+
"""
|
1343 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1344 |
+
|
1345 |
+
transformer_outputs = self.transformer(
|
1346 |
+
input_ids,
|
1347 |
+
past_key_values=past_key_values,
|
1348 |
+
attention_mask=attention_mask,
|
1349 |
+
token_type_ids=token_type_ids,
|
1350 |
+
position_ids=position_ids,
|
1351 |
+
head_mask=head_mask,
|
1352 |
+
inputs_embeds=inputs_embeds,
|
1353 |
+
use_cache=use_cache,
|
1354 |
+
output_attentions=output_attentions,
|
1355 |
+
output_hidden_states=output_hidden_states,
|
1356 |
+
return_dict=return_dict,
|
1357 |
+
)
|
1358 |
+
hidden_states = transformer_outputs[0]
|
1359 |
+
logits = self.score(hidden_states)
|
1360 |
+
|
1361 |
+
if input_ids is not None:
|
1362 |
+
batch_size, sequence_length = input_ids.shape[:2]
|
1363 |
+
else:
|
1364 |
+
batch_size, sequence_length = inputs_embeds.shape[:2]
|
1365 |
+
|
1366 |
+
assert (
|
1367 |
+
self.config.pad_token_id is not None or batch_size == 1
|
1368 |
+
), "Cannot handle batch sizes > 1 if no padding token is defined."
|
1369 |
+
if self.config.pad_token_id is None:
|
1370 |
+
sequence_lengths = -1
|
1371 |
+
else:
|
1372 |
+
if input_ids is not None:
|
1373 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
1374 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1375 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
1376 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
1377 |
+
else:
|
1378 |
+
sequence_lengths = -1
|
1379 |
+
logger.warning(
|
1380 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
1381 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
1382 |
+
)
|
1383 |
+
|
1384 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1385 |
+
|
1386 |
+
loss = None
|
1387 |
+
if labels is not None:
|
1388 |
+
labels = labels.to(logits.device)
|
1389 |
+
|
1390 |
+
if self.config.problem_type is None:
|
1391 |
+
if self.num_labels == 1:
|
1392 |
+
self.config.problem_type = "regression"
|
1393 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1394 |
+
self.config.problem_type = "single_label_classification"
|
1395 |
+
else:
|
1396 |
+
self.config.problem_type = "multi_label_classification"
|
1397 |
+
|
1398 |
+
if self.config.problem_type == "regression":
|
1399 |
+
loss_fct = MSELoss()
|
1400 |
+
if self.num_labels == 1:
|
1401 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1402 |
+
else:
|
1403 |
+
loss = loss_fct(pooled_logits, labels)
|
1404 |
+
elif self.config.problem_type == "single_label_classification":
|
1405 |
+
loss_fct = CrossEntropyLoss()
|
1406 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1407 |
+
elif self.config.problem_type == "multi_label_classification":
|
1408 |
+
loss_fct = BCEWithLogitsLoss()
|
1409 |
+
loss = loss_fct(pooled_logits, labels)
|
1410 |
+
if not return_dict:
|
1411 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1412 |
+
return ((loss,) + output) if loss is not None else output
|
1413 |
+
|
1414 |
+
return SequenceClassifierOutputWithPast(
|
1415 |
+
loss=loss,
|
1416 |
+
logits=pooled_logits,
|
1417 |
+
past_key_values=transformer_outputs.past_key_values,
|
1418 |
+
hidden_states=transformer_outputs.hidden_states,
|
1419 |
+
attentions=transformer_outputs.attentions,
|
1420 |
+
)
|
1421 |
+
|
1422 |
+
|
1423 |
+
@add_start_docstrings(
|
1424 |
+
"""
|
1425 |
+
GPT_BIGCODE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
|
1426 |
+
for Named-Entity-Recognition (NER) tasks.
|
1427 |
+
""",
|
1428 |
+
GPT_BIGCODE_START_DOCSTRING,
|
1429 |
+
)
|
1430 |
+
class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel):
|
1431 |
+
def __init__(self, config):
|
1432 |
+
super().__init__(config)
|
1433 |
+
self.num_labels = config.num_labels
|
1434 |
+
|
1435 |
+
self.transformer = GPTBigCodeModel(config)
|
1436 |
+
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
|
1437 |
+
classifier_dropout = config.classifier_dropout
|
1438 |
+
elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
|
1439 |
+
classifier_dropout = config.hidden_dropout
|
1440 |
+
else:
|
1441 |
+
classifier_dropout = 0.1
|
1442 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1443 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
1444 |
+
|
1445 |
+
# Initialize weights and apply final processing
|
1446 |
+
self.post_init()
|
1447 |
+
|
1448 |
+
@add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
|
1449 |
+
def forward(
|
1450 |
+
self,
|
1451 |
+
input_ids: Optional[torch.Tensor] = None,
|
1452 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1453 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1454 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1455 |
+
position_ids: Optional[torch.Tensor] = None,
|
1456 |
+
head_mask: Optional[torch.Tensor] = None,
|
1457 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1458 |
+
labels: Optional[torch.Tensor] = None,
|
1459 |
+
use_cache: Optional[bool] = None,
|
1460 |
+
output_attentions: Optional[bool] = None,
|
1461 |
+
output_hidden_states: Optional[bool] = None,
|
1462 |
+
return_dict: Optional[bool] = None,
|
1463 |
+
) -> Union[Tuple, TokenClassifierOutput]:
|
1464 |
+
r"""
|
1465 |
+
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1466 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1467 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1468 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1469 |
+
"""
|
1470 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1471 |
+
|
1472 |
+
transformer_outputs = self.transformer(
|
1473 |
+
input_ids,
|
1474 |
+
past_key_values=past_key_values,
|
1475 |
+
attention_mask=attention_mask,
|
1476 |
+
token_type_ids=token_type_ids,
|
1477 |
+
position_ids=position_ids,
|
1478 |
+
head_mask=head_mask,
|
1479 |
+
inputs_embeds=inputs_embeds,
|
1480 |
+
use_cache=use_cache,
|
1481 |
+
output_attentions=output_attentions,
|
1482 |
+
output_hidden_states=output_hidden_states,
|
1483 |
+
return_dict=return_dict,
|
1484 |
+
)
|
1485 |
+
|
1486 |
+
hidden_states = transformer_outputs[0]
|
1487 |
+
hidden_states = self.dropout(hidden_states)
|
1488 |
+
logits = self.classifier(hidden_states)
|
1489 |
+
|
1490 |
+
loss = None
|
1491 |
+
if labels is not None:
|
1492 |
+
loss_fct = CrossEntropyLoss()
|
1493 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device))
|
1494 |
+
|
1495 |
+
if not return_dict:
|
1496 |
+
output = (logits,) + transformer_outputs[2:]
|
1497 |
+
return ((loss,) + output) if loss is not None else output
|
1498 |
+
|
1499 |
+
return TokenClassifierOutput(
|
1500 |
+
loss=loss,
|
1501 |
+
logits=logits,
|
1502 |
+
hidden_states=transformer_outputs.hidden_states,
|
1503 |
+
attentions=transformer_outputs.attentions,
|
1504 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__init__.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...file_utils import _LazyModule, is_torch_available
|
17 |
+
from ...utils import OptionalDependencyNotAvailable
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
|
22 |
+
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
|
23 |
+
}
|
24 |
+
|
25 |
+
try:
|
26 |
+
if not is_torch_available():
|
27 |
+
raise OptionalDependencyNotAvailable()
|
28 |
+
except OptionalDependencyNotAvailable:
|
29 |
+
pass
|
30 |
+
else:
|
31 |
+
_import_structure["modeling_gpt_neox_japanese"] = [
|
32 |
+
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
|
33 |
+
"GPTNeoXJapaneseForCausalLM",
|
34 |
+
"GPTNeoXJapaneseLayer",
|
35 |
+
"GPTNeoXJapaneseModel",
|
36 |
+
"GPTNeoXJapanesePreTrainedModel",
|
37 |
+
]
|
38 |
+
|
39 |
+
|
40 |
+
if TYPE_CHECKING:
|
41 |
+
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
|
42 |
+
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
|
43 |
+
|
44 |
+
try:
|
45 |
+
if not is_torch_available():
|
46 |
+
raise OptionalDependencyNotAvailable()
|
47 |
+
except OptionalDependencyNotAvailable:
|
48 |
+
pass
|
49 |
+
else:
|
50 |
+
from .modeling_gpt_neox_japanese import (
|
51 |
+
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
|
52 |
+
GPTNeoXJapaneseForCausalLM,
|
53 |
+
GPTNeoXJapaneseLayer,
|
54 |
+
GPTNeoXJapaneseModel,
|
55 |
+
GPTNeoXJapanesePreTrainedModel,
|
56 |
+
)
|
57 |
+
|
58 |
+
|
59 |
+
else:
|
60 |
+
import sys
|
61 |
+
|
62 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.16 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/configuration_gpt_neox_japanese.cpython-310.pyc
ADDED
Binary file (5.02 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/modeling_gpt_neox_japanese.cpython-310.pyc
ADDED
Binary file (23.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/__pycache__/tokenization_gpt_neox_japanese.cpython-310.pyc
ADDED
Binary file (15.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" GPTNeoX Japanese model configuration"""
|
16 |
+
|
17 |
+
from ...configuration_utils import PretrainedConfig
|
18 |
+
from ...utils import logging
|
19 |
+
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
|
24 |
+
from ..deprecated._archive_maps import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class GPTNeoXJapaneseConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`GPTNeoXModelJapanese`]. It is used to instantiate
|
30 |
+
a GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a
|
31 |
+
configuration with the defaults will yield a similar configuration to that of the GPTNeoXJapanese
|
32 |
+
[abeja/gpt-neox-japanese-2.7b](https://huggingface.co/abeja/gpt-neox-japanese-2.7b) architecture.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information. Default configs is set as 2.7B model
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
39 |
+
Vocabulary size of the GPTNeoXJapanese model. Defines the number of different tokens that can be
|
40 |
+
represented by the `inputs_ids` passed when calling [`GPTNeoXJapanese`].
|
41 |
+
hidden_size (`int`, *optional*, defaults to 2560):
|
42 |
+
Dimension of the encoder layers and the pooler layer.
|
43 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
44 |
+
Number of hidden layers in the Transformer encoder.
|
45 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
46 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
47 |
+
intermediate_multiple_size (`int`, *optional*, defaults to 4):
|
48 |
+
Dimension of the "intermediate" layer in the Transformer encoder is calculated by hidden_size *
|
49 |
+
intermediate_multiple_size.
|
50 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
51 |
+
The non-linear activation function (function or string) in the encoder and pooler.
|
52 |
+
rotary_pct (`float`, *optional*, defaults to 1.00):
|
53 |
+
percentage of hidden dimensions to allocate to rotary embeddings
|
54 |
+
rotary_emb_base (`int`, *optional*, defaults to 10000)
|
55 |
+
base for computing rotary embeddings frequency
|
56 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
57 |
+
The maximum sequence length that this model might ever be used with.
|
58 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
59 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
60 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
|
61 |
+
The epsilon used by the layer normalization layers.
|
62 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
63 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
64 |
+
relevant if `config.is_decoder=True`.
|
65 |
+
attention_dropout (`float`, *optional*, defaults to 0.1):
|
66 |
+
The dropout ratio for the attention.
|
67 |
+
hidden_dropout (`float`, *optional*, defaults to 0.0):
|
68 |
+
The dropout ratio for the hidden layer.
|
69 |
+
Example:
|
70 |
+
|
71 |
+
```python
|
72 |
+
>>> from transformers import GPTNeoXJapaneseConfig, GPTNeoXJapaneseModel
|
73 |
+
|
74 |
+
>>> # Initializing a GPTNeoXJapanese gpt-neox-japanese-2.7b style configuration
|
75 |
+
>>> configuration = GPTNeoXJapaneseConfig()
|
76 |
+
|
77 |
+
>>> # Initializing a model (with random weights) from the gpt-neox-japanese-2.7b style configuration
|
78 |
+
>>> model = GPTNeoXJapaneseModel(configuration)
|
79 |
+
|
80 |
+
>>> # Accessing the model configuration
|
81 |
+
>>> configuration = model.config
|
82 |
+
```"""
|
83 |
+
|
84 |
+
model_type = "gpt_neox_japanese"
|
85 |
+
|
86 |
+
def __init__(
|
87 |
+
self,
|
88 |
+
vocab_size=32000,
|
89 |
+
hidden_size=2560,
|
90 |
+
num_hidden_layers=32,
|
91 |
+
num_attention_heads=32,
|
92 |
+
intermediate_multiple_size=4,
|
93 |
+
hidden_act="gelu",
|
94 |
+
rotary_pct=1.00,
|
95 |
+
rotary_emb_base=10000,
|
96 |
+
max_position_embeddings=2048,
|
97 |
+
initializer_range=0.02,
|
98 |
+
layer_norm_eps=1e-5,
|
99 |
+
use_cache=True,
|
100 |
+
bos_token_id=31996,
|
101 |
+
eos_token_id=31999,
|
102 |
+
attention_dropout=0.1,
|
103 |
+
hidden_dropout=0.0,
|
104 |
+
**kwargs,
|
105 |
+
):
|
106 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
107 |
+
self.vocab_size = vocab_size
|
108 |
+
self.max_position_embeddings = max_position_embeddings
|
109 |
+
self.hidden_size = hidden_size
|
110 |
+
self.num_hidden_layers = num_hidden_layers
|
111 |
+
self.num_attention_heads = num_attention_heads
|
112 |
+
self.intermediate_multiple_size = intermediate_multiple_size
|
113 |
+
self.hidden_act = hidden_act
|
114 |
+
self.rotary_pct = rotary_pct
|
115 |
+
self.rotary_emb_base = rotary_emb_base
|
116 |
+
self.initializer_range = initializer_range
|
117 |
+
self.layer_norm_eps = layer_norm_eps
|
118 |
+
self.use_cache = use_cache
|
119 |
+
self.attention_dropout = attention_dropout
|
120 |
+
self.hidden_dropout = hidden_dropout
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py
ADDED
@@ -0,0 +1,729 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch GPTNeoX model."""
|
16 |
+
|
17 |
+
from typing import Optional, Tuple, Union
|
18 |
+
|
19 |
+
import torch
|
20 |
+
import torch.utils.checkpoint
|
21 |
+
from torch import Tensor, nn
|
22 |
+
from torch.nn import CrossEntropyLoss
|
23 |
+
|
24 |
+
from ...activations import ACT2FN
|
25 |
+
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
|
26 |
+
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
|
27 |
+
from ...modeling_utils import PreTrainedModel
|
28 |
+
from ...utils import logging
|
29 |
+
from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig
|
30 |
+
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
_CHECKPOINT_FOR_DOC = "abeja/gpt-neox-japanese-2.7b"
|
35 |
+
_CONFIG_FOR_DOC = "GPTNeoXJapaneseConfig"
|
36 |
+
|
37 |
+
|
38 |
+
from ..deprecated._archive_maps import GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
39 |
+
|
40 |
+
|
41 |
+
class GPTNeoXJapanesePreTrainedModel(PreTrainedModel):
|
42 |
+
"""
|
43 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
44 |
+
models.
|
45 |
+
"""
|
46 |
+
|
47 |
+
config_class = GPTNeoXJapaneseConfig
|
48 |
+
base_model_prefix = "gpt_neox_japanese"
|
49 |
+
_no_split_modules = ["GPTNeoXJapaneseLayer"]
|
50 |
+
_skip_keys_device_placement = "past_key_values"
|
51 |
+
|
52 |
+
def _init_weights(self, module):
|
53 |
+
"""Initialize the weights"""
|
54 |
+
if isinstance(module, nn.Linear):
|
55 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
56 |
+
if module.bias is not None:
|
57 |
+
module.bias.data.zero_()
|
58 |
+
elif isinstance(module, nn.Embedding):
|
59 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
60 |
+
if module.padding_idx is not None:
|
61 |
+
module.weight.data[module.padding_idx].zero_()
|
62 |
+
elif isinstance(module, nn.LayerNorm):
|
63 |
+
module.bias.data.zero_()
|
64 |
+
module.weight.data.fill_(1.0)
|
65 |
+
|
66 |
+
|
67 |
+
class GPTNeoXJapaneseAttention(nn.Module):
|
68 |
+
def __init__(self, config, use_bias=False):
|
69 |
+
super().__init__()
|
70 |
+
self.num_attention_heads = config.num_attention_heads
|
71 |
+
self.hidden_size = config.hidden_size
|
72 |
+
self.head_size = self.hidden_size // self.num_attention_heads
|
73 |
+
|
74 |
+
self.rotary_ndims = int(self.head_size * config.rotary_pct)
|
75 |
+
self.rotary_emb = RotaryEmbedding(
|
76 |
+
self.rotary_ndims, config.max_position_embeddings, base=config.rotary_emb_base
|
77 |
+
)
|
78 |
+
self.max_positions = config.max_position_embeddings
|
79 |
+
self.attention_dropout = nn.Dropout(config.attention_dropout)
|
80 |
+
self.norm_factor = torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype())
|
81 |
+
|
82 |
+
self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=False)
|
83 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
|
84 |
+
# Activate bias if the last layer
|
85 |
+
self.use_bias = use_bias
|
86 |
+
self.dense_bias = nn.Parameter(torch.zeros(config.hidden_size)) if use_bias else None
|
87 |
+
|
88 |
+
def forward(
|
89 |
+
self,
|
90 |
+
hidden_states,
|
91 |
+
attention_mask,
|
92 |
+
head_mask=None,
|
93 |
+
layer_past=None,
|
94 |
+
use_cache=False,
|
95 |
+
output_attentions=False,
|
96 |
+
):
|
97 |
+
has_layer_past = layer_past is not None and layer_past[0].numel() > 0
|
98 |
+
|
99 |
+
# Compute QKV
|
100 |
+
# Attention heads [batch, seq_len, hidden_size]
|
101 |
+
# --> [batch, seq_len, (np * 3 * head_size)]
|
102 |
+
qkv = self.query_key_value(hidden_states)
|
103 |
+
|
104 |
+
# [batch, seq_len, (num_heads * 3 * head_size)]
|
105 |
+
# --> [batch, seq_len, num_heads, 3 * head_size]
|
106 |
+
new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)
|
107 |
+
qkv = qkv.view(*new_qkv_shape)
|
108 |
+
|
109 |
+
# [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]
|
110 |
+
query = qkv[..., : self.head_size].permute(0, 2, 1, 3)
|
111 |
+
key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)
|
112 |
+
value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)
|
113 |
+
|
114 |
+
# Compute rotary embeddings on rotary_ndims
|
115 |
+
query_rot = query[..., : self.rotary_ndims]
|
116 |
+
query_pass = query[..., self.rotary_ndims :]
|
117 |
+
key_rot = key[..., : self.rotary_ndims]
|
118 |
+
key_pass = key[..., self.rotary_ndims :]
|
119 |
+
|
120 |
+
# Compute token offset for rotary embeddings (when decoding)
|
121 |
+
seq_len = key.shape[-2]
|
122 |
+
offset = 0
|
123 |
+
if has_layer_past:
|
124 |
+
offset = layer_past[0].shape[-2]
|
125 |
+
seq_len += offset
|
126 |
+
cos, sin = self.rotary_emb(value, seq_len=seq_len)
|
127 |
+
query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, offset=offset)
|
128 |
+
query = torch.cat((query, query_pass), dim=-1)
|
129 |
+
key = torch.cat((key, key_pass), dim=-1)
|
130 |
+
|
131 |
+
# Cache QKV values
|
132 |
+
if has_layer_past:
|
133 |
+
past_key = layer_past[0]
|
134 |
+
past_value = layer_past[1]
|
135 |
+
key = torch.cat((past_key, key), dim=-2)
|
136 |
+
value = torch.cat((past_value, value), dim=-2)
|
137 |
+
present = (key, value) if use_cache else None
|
138 |
+
|
139 |
+
# Compute attention
|
140 |
+
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
141 |
+
|
142 |
+
# Reshape outputs
|
143 |
+
attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)
|
144 |
+
attn_output = self.dense(attn_output)
|
145 |
+
|
146 |
+
outputs = (attn_output, present)
|
147 |
+
if output_attentions:
|
148 |
+
outputs += (attn_weights,)
|
149 |
+
|
150 |
+
return outputs, self.dense_bias
|
151 |
+
|
152 |
+
@classmethod
|
153 |
+
def _split_heads(cls, tensor, num_attention_heads, attn_head_size):
|
154 |
+
"""
|
155 |
+
Splits hidden dim into attn_head_size and num_attention_heads
|
156 |
+
"""
|
157 |
+
# tensor: [bs, seq_len, hidden_size]
|
158 |
+
new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
|
159 |
+
# -> [bs, seq_len, num_attention_heads, attn_head_size]
|
160 |
+
tensor = tensor.view(new_shape)
|
161 |
+
# -> [bs, num_attention_heads, seq_len, attn_head_size]
|
162 |
+
tensor = tensor.permute(0, 2, 1, 3)
|
163 |
+
return tensor
|
164 |
+
|
165 |
+
@classmethod
|
166 |
+
def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):
|
167 |
+
"""
|
168 |
+
Merges attn_head_size dim and num_attn_heads dim into hidden dim
|
169 |
+
"""
|
170 |
+
# tensor [bs, num_attention_heads, seq_len, attn_head_size]
|
171 |
+
tensor = tensor.permute(0, 2, 1, 3).contiguous()
|
172 |
+
# -> [bs, seq_len, num_attention_heads, attn_head_size]
|
173 |
+
tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)
|
174 |
+
# -> [bs, seq_len, hidden_size]
|
175 |
+
return tensor
|
176 |
+
|
177 |
+
def _create_causal_mask(self, key_length, query_length):
|
178 |
+
causal_mask = torch.tril(
|
179 |
+
torch.ones((self.max_positions, self.max_positions), dtype=torch.bool).view(
|
180 |
+
1, 1, self.max_positions, self.max_positions
|
181 |
+
)
|
182 |
+
)
|
183 |
+
return causal_mask[:, :, key_length - query_length : key_length, :key_length]
|
184 |
+
|
185 |
+
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
|
186 |
+
# q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]
|
187 |
+
# compute causal mask from causal mask buffer
|
188 |
+
batch_size, num_attention_heads, query_length, attn_head_size = query.size()
|
189 |
+
key_length = key.size(-2)
|
190 |
+
|
191 |
+
causal_mask = self._create_causal_mask(key_length, query_length)
|
192 |
+
|
193 |
+
query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)
|
194 |
+
key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)
|
195 |
+
attn_scores = torch.zeros(
|
196 |
+
batch_size * num_attention_heads,
|
197 |
+
query_length,
|
198 |
+
key_length,
|
199 |
+
dtype=query.dtype,
|
200 |
+
device=key.device,
|
201 |
+
)
|
202 |
+
attn_scores = torch.baddbmm(
|
203 |
+
attn_scores,
|
204 |
+
query,
|
205 |
+
key.transpose(1, 2),
|
206 |
+
beta=1.0,
|
207 |
+
alpha=(torch.tensor(1.0, dtype=self.norm_factor.dtype, device=self.norm_factor.device) / self.norm_factor),
|
208 |
+
)
|
209 |
+
attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)
|
210 |
+
|
211 |
+
mask_value = torch.finfo(attn_scores.dtype).min
|
212 |
+
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
|
213 |
+
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
|
214 |
+
mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)
|
215 |
+
causal_mask = causal_mask.to(attn_scores.device)
|
216 |
+
attn_scores = torch.where(causal_mask, attn_scores, mask_value)
|
217 |
+
|
218 |
+
if attention_mask is not None:
|
219 |
+
# Apply the attention mask
|
220 |
+
attn_scores = attn_scores + attention_mask
|
221 |
+
|
222 |
+
attn_weights = nn.functional.softmax(attn_scores, dim=-1)
|
223 |
+
attn_weights = self.attention_dropout(attn_weights)
|
224 |
+
attn_weights = attn_weights.to(value.dtype)
|
225 |
+
|
226 |
+
# Mask heads if we want to
|
227 |
+
if head_mask is not None:
|
228 |
+
attn_weights = attn_weights * head_mask
|
229 |
+
|
230 |
+
attn_output = torch.matmul(attn_weights, value)
|
231 |
+
return attn_output, attn_weights
|
232 |
+
|
233 |
+
|
234 |
+
# Copied from transformers.models.gpt_neox.modeling_gpt_neox.GPTNeoXRotaryEmbedding with GPTNeoXRotaryEmbedding->RotaryEmbedding
|
235 |
+
class RotaryEmbedding(nn.Module):
|
236 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding.__init__
|
237 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
238 |
+
super().__init__()
|
239 |
+
|
240 |
+
self.dim = dim
|
241 |
+
self.max_position_embeddings = max_position_embeddings
|
242 |
+
self.base = base
|
243 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
244 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
245 |
+
|
246 |
+
# Build here to make `torch.jit.trace` work.
|
247 |
+
self._set_cos_sin_cache(
|
248 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
249 |
+
)
|
250 |
+
|
251 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
252 |
+
self.max_seq_len_cached = seq_len
|
253 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
254 |
+
|
255 |
+
freqs = torch.outer(t, self.inv_freq)
|
256 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
257 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
258 |
+
self.register_buffer("cos_cached", emb.cos(), persistent=False)
|
259 |
+
self.register_buffer("sin_cached", emb.sin(), persistent=False)
|
260 |
+
|
261 |
+
def forward(self, x, seq_len=None):
|
262 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
263 |
+
if seq_len > self.max_seq_len_cached:
|
264 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
265 |
+
|
266 |
+
return (
|
267 |
+
self.cos_cached[:seq_len],
|
268 |
+
self.sin_cached[:seq_len],
|
269 |
+
)
|
270 |
+
|
271 |
+
|
272 |
+
def rotate_half(x):
|
273 |
+
"""Rotates half the hidden dims of the input."""
|
274 |
+
x1 = x[..., : x.shape[-1] // 2]
|
275 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
276 |
+
return torch.cat((-x2, x1), dim=-1)
|
277 |
+
|
278 |
+
|
279 |
+
def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0):
|
280 |
+
cos = cos[..., offset : q.shape[-2] + offset, :]
|
281 |
+
sin = sin[..., offset : q.shape[-2] + offset, :]
|
282 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
283 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
284 |
+
return q_embed, k_embed
|
285 |
+
|
286 |
+
|
287 |
+
def bias_dropout_add(x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool) -> Tensor:
|
288 |
+
"""add bias to x, apply dropout and residual connection
|
289 |
+
|
290 |
+
Args:
|
291 |
+
x (Tensor): main path of output
|
292 |
+
bias (Tensor): None or attn_bias of the last attention layer
|
293 |
+
residual (Optional[Tensor]): residual value
|
294 |
+
prob (float): dropout probability
|
295 |
+
training (bool): whether in training mode or not
|
296 |
+
|
297 |
+
Returns:
|
298 |
+
Tensor: dropout(x + bias) + residual
|
299 |
+
"""
|
300 |
+
if bias is not None:
|
301 |
+
x = x + bias
|
302 |
+
out = torch.nn.functional.dropout(x, p=prob, training=training)
|
303 |
+
if residual is not None:
|
304 |
+
out = residual + out
|
305 |
+
return out
|
306 |
+
|
307 |
+
|
308 |
+
class GPTNeoXJapaneseMLP(nn.Module):
|
309 |
+
def __init__(self, config):
|
310 |
+
super().__init__()
|
311 |
+
intermediate_size = int(config.hidden_size * config.intermediate_multiple_size)
|
312 |
+
self.dense_h_to_4h = nn.Linear(config.hidden_size, intermediate_size, bias=False)
|
313 |
+
# Project back to h.
|
314 |
+
self.dense_4h_to_h = nn.Linear(intermediate_size, config.hidden_size, bias=False)
|
315 |
+
self.act = ACT2FN[config.hidden_act]
|
316 |
+
|
317 |
+
def forward(self, hidden_states):
|
318 |
+
intermediate = self.dense_h_to_4h(hidden_states)
|
319 |
+
intermediate = self.act(intermediate)
|
320 |
+
output = self.dense_4h_to_h(intermediate)
|
321 |
+
return output
|
322 |
+
|
323 |
+
|
324 |
+
class GPTNeoXJapaneseLayer(nn.Module):
|
325 |
+
def __init__(self, config, layer_number):
|
326 |
+
super().__init__()
|
327 |
+
self.layer_number = layer_number
|
328 |
+
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
329 |
+
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
330 |
+
# activate bias only last layer
|
331 |
+
self.attention = GPTNeoXJapaneseAttention(config=config, use_bias=layer_number == config.num_hidden_layers - 1)
|
332 |
+
self.mlp = GPTNeoXJapaneseMLP(config)
|
333 |
+
self.hidden_dropout = config.hidden_dropout
|
334 |
+
|
335 |
+
def forward(
|
336 |
+
self,
|
337 |
+
hidden_states,
|
338 |
+
attention_mask=None,
|
339 |
+
head_mask=None,
|
340 |
+
use_cache=False,
|
341 |
+
layer_past=None,
|
342 |
+
output_attentions=False,
|
343 |
+
):
|
344 |
+
residual = hidden_states
|
345 |
+
ln_out = self.input_layernorm(hidden_states)
|
346 |
+
attention_layer_outputs, attn_bias = self.attention(
|
347 |
+
ln_out,
|
348 |
+
attention_mask=attention_mask,
|
349 |
+
layer_past=layer_past,
|
350 |
+
head_mask=head_mask,
|
351 |
+
use_cache=use_cache,
|
352 |
+
output_attentions=output_attentions,
|
353 |
+
)
|
354 |
+
attn_output = attention_layer_outputs[0] # output_attn: a, present, (attentions)
|
355 |
+
outputs = attention_layer_outputs[1:]
|
356 |
+
|
357 |
+
# attn_output = (atten_output + bias) + residual
|
358 |
+
attn_output = bias_dropout_add(
|
359 |
+
attn_output,
|
360 |
+
bias=attn_bias.expand_as(residual) if attn_bias is not None else attn_bias,
|
361 |
+
residual=residual,
|
362 |
+
prob=self.hidden_dropout,
|
363 |
+
training=self.training,
|
364 |
+
)
|
365 |
+
mlp_output = self.mlp(self.post_attention_layernorm(attn_output))
|
366 |
+
|
367 |
+
# attn_output = (mlp_output + mlp_bias) + atten_output
|
368 |
+
attn_output = bias_dropout_add(
|
369 |
+
mlp_output, bias=None, residual=attn_output, prob=self.hidden_dropout, training=self.training
|
370 |
+
)
|
371 |
+
|
372 |
+
if use_cache:
|
373 |
+
outputs = (attn_output,) + outputs
|
374 |
+
else:
|
375 |
+
outputs = (attn_output,) + outputs[1:]
|
376 |
+
|
377 |
+
return outputs # hidden_states, present, (attentions)
|
378 |
+
|
379 |
+
|
380 |
+
GPT_NEOX_JAPANESE_START_DOCSTRING = r"""
|
381 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
|
382 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
383 |
+
behavior.
|
384 |
+
|
385 |
+
Parameters:
|
386 |
+
config ([`~GPTNeoXJapaneseConfig`]): Model configuration class with all the parameters of the model.
|
387 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
388 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
389 |
+
"""
|
390 |
+
|
391 |
+
GPT_NEOX_JAPANESE_INPUTS_DOCSTRING = r"""
|
392 |
+
Args:
|
393 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
394 |
+
Indices of input sequence tokens in the vocabulary.
|
395 |
+
|
396 |
+
Indices can be obtained using [`AutoTokenizer`].
|
397 |
+
|
398 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
399 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
400 |
+
|
401 |
+
- 1 for tokens that are **not masked**,
|
402 |
+
- 0 for tokens that are **masked**.
|
403 |
+
|
404 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
405 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
406 |
+
1]`:
|
407 |
+
|
408 |
+
- 0 corresponds to a *sentence A* token,
|
409 |
+
- 1 corresponds to a *sentence B* token.
|
410 |
+
|
411 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
412 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
413 |
+
config.max_position_embeddings - 1]`.
|
414 |
+
|
415 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
416 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
417 |
+
|
418 |
+
- 1 indicates the head is **not masked**,
|
419 |
+
- 0 indicates the head is **masked**.
|
420 |
+
|
421 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
422 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
423 |
+
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
|
424 |
+
model's internal embedding lookup matrix.
|
425 |
+
output_attentions (`bool`, *optional*):
|
426 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
427 |
+
tensors for more detail.
|
428 |
+
output_hidden_states (`bool`, *optional*):
|
429 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
430 |
+
more detail.
|
431 |
+
return_dict (`bool`, *optional*):
|
432 |
+
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
|
433 |
+
"""
|
434 |
+
|
435 |
+
|
436 |
+
@add_start_docstrings(
|
437 |
+
"The bare GPTNeoXJapanese Model transformer outputting raw hidden-states without any specific head on top.",
|
438 |
+
GPT_NEOX_JAPANESE_START_DOCSTRING,
|
439 |
+
)
|
440 |
+
class GPTNeoXJapaneseModel(GPTNeoXJapanesePreTrainedModel):
|
441 |
+
def __init__(self, config):
|
442 |
+
super().__init__(config)
|
443 |
+
self.config = config
|
444 |
+
|
445 |
+
self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size)
|
446 |
+
self.layers = nn.ModuleList(
|
447 |
+
[GPTNeoXJapaneseLayer(config=config, layer_number=i) for i in range(config.num_hidden_layers)]
|
448 |
+
)
|
449 |
+
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
450 |
+
|
451 |
+
# Initialize weights and apply final processing
|
452 |
+
self.post_init()
|
453 |
+
|
454 |
+
def get_input_embeddings(self):
|
455 |
+
return self.embed_in
|
456 |
+
|
457 |
+
def set_input_embeddings(self, value):
|
458 |
+
self.embed_in = value
|
459 |
+
|
460 |
+
@add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
461 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
462 |
+
def forward(
|
463 |
+
self,
|
464 |
+
input_ids: Optional[torch.LongTensor] = None,
|
465 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
466 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
467 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
468 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
469 |
+
use_cache: Optional[bool] = None,
|
470 |
+
output_attentions: Optional[bool] = None,
|
471 |
+
output_hidden_states: Optional[bool] = None,
|
472 |
+
return_dict: Optional[bool] = None,
|
473 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
474 |
+
r"""
|
475 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
476 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
477 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
478 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
479 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
480 |
+
use_cache (`bool`, *optional*):
|
481 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
482 |
+
`past_key_values`).
|
483 |
+
|
484 |
+
Returns:
|
485 |
+
|
486 |
+
Example:
|
487 |
+
|
488 |
+
```python
|
489 |
+
>>> from transformers import AutoTokenizer, GPTNeoXJapaneseModel
|
490 |
+
>>> import torch
|
491 |
+
|
492 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b")
|
493 |
+
>>> model = GPTNeoXJapaneseModel.from_pretrained("abeja/gpt-neox-japanese-2.7b")
|
494 |
+
|
495 |
+
>>> inputs = tokenizer("日本語のGPT-neoxがHugging Faceで使えます😀", return_tensors="pt")
|
496 |
+
>>> outputs = model(**inputs)
|
497 |
+
|
498 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
499 |
+
```
|
500 |
+
"""
|
501 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
502 |
+
output_hidden_states = (
|
503 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
504 |
+
)
|
505 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
506 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
507 |
+
|
508 |
+
if input_ids is not None and inputs_embeds is not None:
|
509 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
510 |
+
elif input_ids is not None:
|
511 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
512 |
+
input_shape = input_ids.size()
|
513 |
+
elif inputs_embeds is not None:
|
514 |
+
input_shape = inputs_embeds.size()[:-1]
|
515 |
+
else:
|
516 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
517 |
+
|
518 |
+
batch_size, seq_length = input_shape
|
519 |
+
|
520 |
+
if past_key_values is None:
|
521 |
+
past_key_values = tuple([None] * self.config.num_hidden_layers)
|
522 |
+
|
523 |
+
# Attention mask.
|
524 |
+
if attention_mask is not None:
|
525 |
+
if not batch_size > 0:
|
526 |
+
raise ValueError("batch_size has to be defined and > 0")
|
527 |
+
attention_mask = attention_mask.view(batch_size, -1)
|
528 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
529 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
530 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
531 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
532 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
533 |
+
attention_mask = attention_mask[:, None, None, :]
|
534 |
+
|
535 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
536 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
537 |
+
# positions we want to attend and -10000.0 for masked positions.
|
538 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
539 |
+
# effectively the same as removing these entirely.
|
540 |
+
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
541 |
+
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
|
542 |
+
|
543 |
+
# Prepare head mask if needed
|
544 |
+
# 1.0 in head_mask indicate we keep the head
|
545 |
+
# attention_probs has shape bsz x n_heads x N x N
|
546 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
547 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
548 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
549 |
+
|
550 |
+
if inputs_embeds is None:
|
551 |
+
inputs_embeds = self.embed_in(input_ids)
|
552 |
+
|
553 |
+
hidden_states = inputs_embeds
|
554 |
+
|
555 |
+
presents = () if use_cache else None
|
556 |
+
all_attentions = () if output_attentions else None
|
557 |
+
all_hidden_states = () if output_hidden_states else None
|
558 |
+
for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):
|
559 |
+
if output_hidden_states:
|
560 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
561 |
+
outputs = layer(
|
562 |
+
hidden_states,
|
563 |
+
attention_mask=attention_mask,
|
564 |
+
head_mask=head_mask[i],
|
565 |
+
layer_past=layer_past,
|
566 |
+
use_cache=use_cache,
|
567 |
+
output_attentions=output_attentions,
|
568 |
+
)
|
569 |
+
hidden_states = outputs[0]
|
570 |
+
if use_cache is True:
|
571 |
+
presents = presents + (outputs[1],)
|
572 |
+
if output_attentions:
|
573 |
+
all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
|
574 |
+
|
575 |
+
hidden_states = self.final_layer_norm(hidden_states)
|
576 |
+
# Add last hidden state
|
577 |
+
if output_hidden_states:
|
578 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
579 |
+
|
580 |
+
if not return_dict:
|
581 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
582 |
+
|
583 |
+
return BaseModelOutputWithPast(
|
584 |
+
last_hidden_state=hidden_states,
|
585 |
+
past_key_values=presents,
|
586 |
+
hidden_states=all_hidden_states,
|
587 |
+
attentions=all_attentions,
|
588 |
+
)
|
589 |
+
|
590 |
+
|
591 |
+
@add_start_docstrings(
|
592 |
+
"""GPTNeoXJapanese Model with a `language modeling` head on top for Classifier Model fine-tuning.""",
|
593 |
+
GPT_NEOX_JAPANESE_START_DOCSTRING,
|
594 |
+
)
|
595 |
+
class GPTNeoXJapaneseForCausalLM(GPTNeoXJapanesePreTrainedModel):
|
596 |
+
_tied_weights_keys = ["embed_out.weight"]
|
597 |
+
|
598 |
+
def __init__(self, config):
|
599 |
+
super().__init__(config)
|
600 |
+
self.config = config
|
601 |
+
|
602 |
+
self.gpt_neox_japanese = GPTNeoXJapaneseModel(config)
|
603 |
+
self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
604 |
+
|
605 |
+
# Initialize weights and apply final processing
|
606 |
+
self.post_init()
|
607 |
+
|
608 |
+
def get_output_embeddings(self):
|
609 |
+
return self.embed_out
|
610 |
+
|
611 |
+
def set_output_embeddings(self, new_embeddings):
|
612 |
+
self.embed_out = new_embeddings
|
613 |
+
|
614 |
+
@add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
615 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
616 |
+
def forward(
|
617 |
+
self,
|
618 |
+
input_ids: Optional[torch.LongTensor] = None,
|
619 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
620 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
621 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
622 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
623 |
+
labels: Optional[torch.LongTensor] = None,
|
624 |
+
use_cache: Optional[bool] = None,
|
625 |
+
output_attentions: Optional[bool] = None,
|
626 |
+
output_hidden_states: Optional[bool] = None,
|
627 |
+
return_dict: Optional[bool] = None,
|
628 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
629 |
+
r"""
|
630 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
631 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
632 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
633 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are
|
634 |
+
only required when the model is used as a decoder in a Sequence to Sequence model.
|
635 |
+
|
636 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see
|
637 |
+
`past_key_values` input) to speed up sequential decoding.
|
638 |
+
|
639 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
640 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
641 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
642 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
643 |
+
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
644 |
+
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
|
645 |
+
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
|
646 |
+
use_cache (`bool`, *optional*):
|
647 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
648 |
+
`past_key_values`).
|
649 |
+
|
650 |
+
Returns:
|
651 |
+
|
652 |
+
Example:
|
653 |
+
|
654 |
+
```python
|
655 |
+
>>> from transformers import AutoTokenizer, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseConfig
|
656 |
+
>>> import torch
|
657 |
+
|
658 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b")
|
659 |
+
>>> config = GPTNeoXJapaneseConfig.from_pretrained("abeja/gpt-neox-japanese-2.7b")
|
660 |
+
>>> config.is_decoder = True
|
661 |
+
>>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b", config=config)
|
662 |
+
|
663 |
+
>>> inputs = tokenizer("日本語のGPT-neoxがHugging Faceで使えます😀", return_tensors="pt")
|
664 |
+
>>> outputs = model(**inputs)
|
665 |
+
|
666 |
+
>>> prediction_logits = outputs.logits
|
667 |
+
```
|
668 |
+
"""
|
669 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
670 |
+
|
671 |
+
outputs = self.gpt_neox_japanese(
|
672 |
+
input_ids,
|
673 |
+
attention_mask=attention_mask,
|
674 |
+
head_mask=head_mask,
|
675 |
+
inputs_embeds=inputs_embeds,
|
676 |
+
past_key_values=past_key_values,
|
677 |
+
use_cache=use_cache,
|
678 |
+
output_attentions=output_attentions,
|
679 |
+
output_hidden_states=output_hidden_states,
|
680 |
+
return_dict=return_dict,
|
681 |
+
)
|
682 |
+
|
683 |
+
hidden_states = outputs[0]
|
684 |
+
lm_logits = self.embed_out(hidden_states)
|
685 |
+
|
686 |
+
lm_loss = None
|
687 |
+
if labels is not None:
|
688 |
+
# move labels to correct device to enable model parallelism
|
689 |
+
labels = labels.to(lm_logits.device)
|
690 |
+
|
691 |
+
# we are doing next-token prediction; shift prediction scores and input ids by one
|
692 |
+
shift_logits = lm_logits[:, :-1, :].contiguous()
|
693 |
+
labels = labels[:, 1:].contiguous()
|
694 |
+
loss_fct = CrossEntropyLoss()
|
695 |
+
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))
|
696 |
+
|
697 |
+
if not return_dict:
|
698 |
+
output = (lm_logits,) + outputs[1:]
|
699 |
+
return ((lm_loss,) + output) if lm_loss is not None else output
|
700 |
+
|
701 |
+
return CausalLMOutputWithPast(
|
702 |
+
loss=lm_loss,
|
703 |
+
logits=lm_logits,
|
704 |
+
past_key_values=outputs.past_key_values,
|
705 |
+
hidden_states=outputs.hidden_states,
|
706 |
+
attentions=outputs.attentions,
|
707 |
+
)
|
708 |
+
|
709 |
+
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
|
710 |
+
input_shape = input_ids.shape
|
711 |
+
|
712 |
+
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
713 |
+
if attention_mask is None:
|
714 |
+
attention_mask = input_ids.new_ones(input_shape)
|
715 |
+
|
716 |
+
# cut decoder_input_ids if past is used
|
717 |
+
if past_key_values and past_key_values[0] is not None:
|
718 |
+
input_ids = input_ids[:, -1:]
|
719 |
+
|
720 |
+
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
|
721 |
+
|
722 |
+
def _reorder_cache(self, past_key_values, beam_idx):
|
723 |
+
reordered_past = ()
|
724 |
+
for layer_past in past_key_values:
|
725 |
+
reordered_past += (
|
726 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
|
727 |
+
+ layer_past[2:],
|
728 |
+
)
|
729 |
+
return reordered_past
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes for GPTNeoXJapanese."""
|
16 |
+
import collections
|
17 |
+
import json
|
18 |
+
import os
|
19 |
+
import re
|
20 |
+
from typing import Optional, Tuple
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
from ...tokenization_utils_fast import PreTrainedTokenizer
|
25 |
+
from ...utils import logging
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
|
31 |
+
|
32 |
+
|
33 |
+
def load_vocab_and_emoji(vocab_file, emoji_file):
|
34 |
+
"""Loads a vocabulary file and emoji file into a dictionary."""
|
35 |
+
with open(emoji_file, "r", encoding="utf-8") as f:
|
36 |
+
emoji = json.loads(f.read())
|
37 |
+
|
38 |
+
vocab = collections.OrderedDict()
|
39 |
+
raw_vocab = collections.OrderedDict()
|
40 |
+
ids_to_tokens = collections.OrderedDict()
|
41 |
+
with open(vocab_file, "r", encoding="utf-8") as f:
|
42 |
+
token = f.readlines()
|
43 |
+
token = [[t.rstrip("\n")] if (t == "," or "," not in t) else t.rstrip("\n").split(",") for t in token]
|
44 |
+
for idx, b in enumerate(token):
|
45 |
+
ids_to_tokens[idx] = b
|
46 |
+
raw_vocab[",".join(b)] = idx
|
47 |
+
for wd in b:
|
48 |
+
vocab[wd] = idx
|
49 |
+
|
50 |
+
return vocab, raw_vocab, ids_to_tokens, emoji
|
51 |
+
|
52 |
+
|
53 |
+
class GPTNeoXJapaneseTokenizer(PreTrainedTokenizer):
|
54 |
+
"""
|
55 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] and is based on Japanese special Sub-Word-Encoding that is
|
56 |
+
used in this repository (https://github.com/tanreinama/Japanese-BPEEncoder_V2). Check the repository for details.
|
57 |
+
Japanese has a relatively large vocabulary and there is no separation between words. Furthermore, the language is a
|
58 |
+
combination of hiragana, katakana, and kanji, and variants such as "1" and "①" are often used. In order to cope
|
59 |
+
with these, this tokenizer has the following features
|
60 |
+
- Subword-by-subword segmentation, which is intermediate between byte strings and morphological analysis.
|
61 |
+
- BPEs are created for each Kanji, Hiragana, and Katakana character, and there are no BPEs that cross character
|
62 |
+
types, such as Kanji + Hiragana or Hiragana + Katakana.
|
63 |
+
- All-byte encoding that does not require <unk>.
|
64 |
+
- Independent of UTF codes such as 2-byte and 3-byte characters
|
65 |
+
- Conversion of heterographs to the same token_id
|
66 |
+
- Emoji and Emoticon are grouped into 12 types as special tags.
|
67 |
+
|
68 |
+
Example:
|
69 |
+
|
70 |
+
```python
|
71 |
+
>>> from transformers import GPTNeoXJapaneseTokenizer
|
72 |
+
|
73 |
+
>>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b")
|
74 |
+
>>> # You can confirm both 慶応 and 慶應 are encoded to 17749
|
75 |
+
>>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]
|
76 |
+
[30014, 26883, 26638, 27228, 25, 26650, 31732, 31679, 27809, 26638, 17749, 31592, 17749, 31593, 321, 1281]
|
77 |
+
|
78 |
+
>>> # Both 慶応 and 慶應 are decoded to 慶応
|
79 |
+
>>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"])
|
80 |
+
'吾輩は猫である🐯。実は慶応(慶応)大学出身'
|
81 |
+
```
|
82 |
+
|
83 |
+
Args:
|
84 |
+
vocab_file (`str`):
|
85 |
+
File containing the vocabulary.
|
86 |
+
emoji_file (`str`):
|
87 |
+
File containing the emoji.
|
88 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
89 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
90 |
+
token instead.
|
91 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
92 |
+
The token used for padding
|
93 |
+
bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
|
94 |
+
The beginning of sequence token.
|
95 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
96 |
+
The end of sequence token.
|
97 |
+
do_clean_text (`bool`, *optional*, defaults to `False`):
|
98 |
+
Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE.
|
99 |
+
"""
|
100 |
+
|
101 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
102 |
+
model_input_names = ["input_ids", "attention_mask"]
|
103 |
+
|
104 |
+
def __init__(
|
105 |
+
self,
|
106 |
+
vocab_file,
|
107 |
+
emoji_file,
|
108 |
+
unk_token="<|endoftext|>",
|
109 |
+
pad_token="<|endoftext|>",
|
110 |
+
bos_token="<|startoftext|>",
|
111 |
+
eos_token="<|endoftext|>",
|
112 |
+
do_clean_text=False,
|
113 |
+
**kwargs,
|
114 |
+
):
|
115 |
+
if not os.path.isfile(vocab_file):
|
116 |
+
raise ValueError(
|
117 |
+
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
|
118 |
+
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
|
119 |
+
)
|
120 |
+
if not os.path.isfile(emoji_file):
|
121 |
+
raise ValueError(
|
122 |
+
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
|
123 |
+
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
|
124 |
+
)
|
125 |
+
self.do_clean_text = do_clean_text
|
126 |
+
self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file)
|
127 |
+
self.subword_tokenizer = SubWordJapaneseTokenizer(
|
128 |
+
vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji
|
129 |
+
)
|
130 |
+
super().__init__(
|
131 |
+
unk_token=unk_token,
|
132 |
+
pad_token=pad_token,
|
133 |
+
bos_token=bos_token,
|
134 |
+
eos_token=eos_token,
|
135 |
+
do_clean_text=do_clean_text,
|
136 |
+
**kwargs,
|
137 |
+
)
|
138 |
+
|
139 |
+
@property
|
140 |
+
def vocab_size(self):
|
141 |
+
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
|
142 |
+
return len(self.raw_vocab)
|
143 |
+
|
144 |
+
def get_vocab(self):
|
145 |
+
return dict(self.raw_vocab, **self.added_tokens_encoder)
|
146 |
+
|
147 |
+
def _tokenize(self, text):
|
148 |
+
return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)
|
149 |
+
|
150 |
+
def _convert_token_to_id(self, token):
|
151 |
+
"""Converts a token (str) in an id using the vocab."""
|
152 |
+
return self.vocab.get(token, self.vocab.get(self.unk_token))
|
153 |
+
|
154 |
+
def _convert_id_to_token(self, index):
|
155 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
156 |
+
return self.subword_tokenizer.convert_id_to_token(index)
|
157 |
+
|
158 |
+
def convert_tokens_to_string(self, tokens):
|
159 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
160 |
+
out_string = "".join(tokens).strip()
|
161 |
+
return out_string
|
162 |
+
|
163 |
+
@property
|
164 |
+
def default_chat_template(self):
|
165 |
+
"""
|
166 |
+
A simple chat template that just adds BOS/EOS tokens around messages while discarding role information.
|
167 |
+
"""
|
168 |
+
logger.warning_once(
|
169 |
+
"\nNo chat template is defined for this tokenizer - using the default template "
|
170 |
+
f"for the {self.__class__.__name__} class. If the default is not appropriate for "
|
171 |
+
"your model, please set `tokenizer.chat_template` to an appropriate template. "
|
172 |
+
"See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
|
173 |
+
)
|
174 |
+
return (
|
175 |
+
"{% for message in messages %}"
|
176 |
+
"{{ bos_token + eos_token + message.content + eos_token }}"
|
177 |
+
"{% endfor %}"
|
178 |
+
"{% if add_generation_prompt %} {{ bos_token + eos_token }} {% endif %}"
|
179 |
+
)
|
180 |
+
|
181 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
182 |
+
index = 0
|
183 |
+
if os.path.isdir(save_directory):
|
184 |
+
vocab_file = os.path.join(
|
185 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
186 |
+
)
|
187 |
+
emoji_file = os.path.join(
|
188 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"]
|
189 |
+
)
|
190 |
+
else:
|
191 |
+
vocab_file = (
|
192 |
+
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
|
193 |
+
)
|
194 |
+
emoji_file = (
|
195 |
+
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
|
196 |
+
)
|
197 |
+
with open(vocab_file, "w", encoding="utf-8") as writer:
|
198 |
+
for token_index, token in self.ids_to_tokens.items():
|
199 |
+
if index != token_index:
|
200 |
+
logger.warning(
|
201 |
+
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
|
202 |
+
" Please check that the vocabulary is not corrupted!"
|
203 |
+
)
|
204 |
+
index = token_index
|
205 |
+
writer.write(",".join(token) + "\n")
|
206 |
+
index += 1
|
207 |
+
with open(emoji_file, "w", encoding="utf-8") as writer:
|
208 |
+
json.dump(self.emoji, writer)
|
209 |
+
return vocab_file, emoji_file
|
210 |
+
|
211 |
+
|
212 |
+
class SubWordJapaneseTokenizer(object):
|
213 |
+
"""
|
214 |
+
https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the
|
215 |
+
original repository.
|
216 |
+
|
217 |
+
MIT License
|
218 |
+
|
219 |
+
Copyright (c) 2020 tanreinama
|
220 |
+
|
221 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
|
222 |
+
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
|
223 |
+
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
224 |
+
permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
225 |
+
|
226 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
|
227 |
+
the Software.
|
228 |
+
|
229 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
|
230 |
+
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
231 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
232 |
+
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
233 |
+
SOFTWARE.
|
234 |
+
"""
|
235 |
+
|
236 |
+
def __init__(self, vocab, ids_to_tokens, emoji):
|
237 |
+
self.vocab = vocab # same as swe
|
238 |
+
self.ids_to_tokens = ids_to_tokens # same as bpe
|
239 |
+
self.emoji = emoji
|
240 |
+
self.maxlen = np.max([len(w) for w in self.vocab.keys()])
|
241 |
+
self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
|
242 |
+
self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
|
243 |
+
self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
|
244 |
+
self.content_repatter4 = re.compile(
|
245 |
+
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
|
246 |
+
)
|
247 |
+
self.content_repatter5 = re.compile(
|
248 |
+
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
|
249 |
+
)
|
250 |
+
self.content_repatter6 = re.compile(
|
251 |
+
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*"
|
252 |
+
)
|
253 |
+
keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
|
254 |
+
blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
|
255 |
+
self.content_trans1 = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
|
256 |
+
|
257 |
+
def __len__(self):
|
258 |
+
return len(self.ids_to_tokens)
|
259 |
+
|
260 |
+
def clean_text(self, content):
|
261 |
+
content = self.content_repatter1.sub("<URL>", content)
|
262 |
+
content = self.content_repatter2.sub("<EMAIL>", content)
|
263 |
+
content = self.content_repatter3.sub("<TEL>", content)
|
264 |
+
content = self.content_repatter4.sub("<DATE>", content)
|
265 |
+
content = self.content_repatter5.sub("<DATE>", content)
|
266 |
+
content = self.content_repatter6.sub("<PRICE>", content)
|
267 |
+
content = content.translate(self.content_trans1)
|
268 |
+
while "<BLOCK><BLOCK>" in content:
|
269 |
+
content = content.replace("<BLOCK><BLOCK>", "<BLOCK>")
|
270 |
+
return content
|
271 |
+
|
272 |
+
def tokenize(self, text, clean=False):
|
273 |
+
text = text.replace(" ", "<SP>")
|
274 |
+
text = text.replace(" ", "<SP>")
|
275 |
+
text = text.replace("\r\n", "<BR>")
|
276 |
+
text = text.replace("\n", "<BR>")
|
277 |
+
text = text.replace("\r", "<BR>")
|
278 |
+
text = text.replace("\t", "<TAB>")
|
279 |
+
text = text.replace("—", "ー")
|
280 |
+
text = text.replace("−", "ー")
|
281 |
+
for k, v in self.emoji["emoji"].items():
|
282 |
+
if k in text:
|
283 |
+
text = text.replace(k, v)
|
284 |
+
if clean:
|
285 |
+
text = self.clean_text(text)
|
286 |
+
|
287 |
+
def check_simbol(x):
|
288 |
+
e = x.encode()
|
289 |
+
if len(x) == 1 and len(e) == 2:
|
290 |
+
c = (int(e[0]) << 8) + int(e[1])
|
291 |
+
if (
|
292 |
+
(c >= 0xC2A1 and c <= 0xC2BF)
|
293 |
+
or (c >= 0xC780 and c <= 0xC783)
|
294 |
+
or (c >= 0xCAB9 and c <= 0xCBBF)
|
295 |
+
or (c >= 0xCC80 and c <= 0xCDA2)
|
296 |
+
):
|
297 |
+
return True
|
298 |
+
return False
|
299 |
+
|
300 |
+
def checku2e(x):
|
301 |
+
e = x.encode()
|
302 |
+
if len(x) == 1 and len(e) == 3:
|
303 |
+
c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
|
304 |
+
if c >= 0xE28080 and c <= 0xE2B07F:
|
305 |
+
return True
|
306 |
+
return False
|
307 |
+
|
308 |
+
pos = 0
|
309 |
+
result = []
|
310 |
+
while pos < len(text):
|
311 |
+
end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
|
312 |
+
candidates = [] # (token_id, token, pos)
|
313 |
+
for e in range(end, pos, -1):
|
314 |
+
wd = text[pos:e]
|
315 |
+
if wd in self.vocab:
|
316 |
+
if wd[0] == "<" and len(wd) > 2:
|
317 |
+
candidates = [(self.vocab[wd], wd, e)]
|
318 |
+
break
|
319 |
+
else:
|
320 |
+
candidates.append((self.vocab[wd], wd, e))
|
321 |
+
if len(candidates) > 0:
|
322 |
+
# the smallest token_id is adopted
|
323 |
+
_, wd, e = sorted(candidates, key=lambda x: x[0])[0]
|
324 |
+
result.append(wd)
|
325 |
+
pos = e
|
326 |
+
else:
|
327 |
+
end = pos + 1
|
328 |
+
wd = text[pos:end]
|
329 |
+
if check_simbol(wd):
|
330 |
+
result.append("<KIGOU>")
|
331 |
+
elif checku2e(wd):
|
332 |
+
result.append("<U2000U2BFF>")
|
333 |
+
else:
|
334 |
+
for i in wd.encode("utf-8"):
|
335 |
+
result.append("<|byte%d|>" % i)
|
336 |
+
pos = end
|
337 |
+
return result
|
338 |
+
|
339 |
+
def convert_id_to_token(self, index, breakline="\n"):
|
340 |
+
words = []
|
341 |
+
byte_tokens = []
|
342 |
+
word = self.ids_to_tokens[index][0]
|
343 |
+
if word[:6] == "<|byte" and word[-2:] == "|>":
|
344 |
+
byte_tokens.append(int(word[6:-2]))
|
345 |
+
else:
|
346 |
+
if len(byte_tokens) > 0:
|
347 |
+
words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
|
348 |
+
byte_tokens = []
|
349 |
+
if word[:7] == "<|emoji" and word[-2:] == "|>":
|
350 |
+
words.append(self.emoji["emoji_inv"][word])
|
351 |
+
elif word == "<SP>":
|
352 |
+
words.append(" ")
|
353 |
+
elif word == "<BR>":
|
354 |
+
words.append(breakline)
|
355 |
+
elif word == "<TAB>":
|
356 |
+
words.append("\t")
|
357 |
+
elif word == "<BLOCK>":
|
358 |
+
words.append("▀")
|
359 |
+
elif word == "<KIGOU>":
|
360 |
+
words.append("ǀ")
|
361 |
+
elif word == "<U2000U2BFF>":
|
362 |
+
words.append("‖")
|
363 |
+
else:
|
364 |
+
words.append(word)
|
365 |
+
if len(byte_tokens) > 0:
|
366 |
+
words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
|
367 |
+
text = "".join(words)
|
368 |
+
return text
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_tf_available,
|
20 |
+
is_torch_available,
|
21 |
+
is_vision_available,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
_import_structure = {
|
26 |
+
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
|
27 |
+
}
|
28 |
+
|
29 |
+
try:
|
30 |
+
if not is_vision_available():
|
31 |
+
raise OptionalDependencyNotAvailable()
|
32 |
+
except OptionalDependencyNotAvailable:
|
33 |
+
pass
|
34 |
+
else:
|
35 |
+
_import_structure["feature_extraction_mobilevit"] = ["MobileViTFeatureExtractor"]
|
36 |
+
_import_structure["image_processing_mobilevit"] = ["MobileViTImageProcessor"]
|
37 |
+
|
38 |
+
try:
|
39 |
+
if not is_torch_available():
|
40 |
+
raise OptionalDependencyNotAvailable()
|
41 |
+
except OptionalDependencyNotAvailable:
|
42 |
+
pass
|
43 |
+
else:
|
44 |
+
_import_structure["modeling_mobilevit"] = [
|
45 |
+
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
46 |
+
"MobileViTForImageClassification",
|
47 |
+
"MobileViTForSemanticSegmentation",
|
48 |
+
"MobileViTModel",
|
49 |
+
"MobileViTPreTrainedModel",
|
50 |
+
]
|
51 |
+
|
52 |
+
try:
|
53 |
+
if not is_tf_available():
|
54 |
+
raise OptionalDependencyNotAvailable()
|
55 |
+
except OptionalDependencyNotAvailable:
|
56 |
+
pass
|
57 |
+
else:
|
58 |
+
_import_structure["modeling_tf_mobilevit"] = [
|
59 |
+
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
60 |
+
"TFMobileViTForImageClassification",
|
61 |
+
"TFMobileViTForSemanticSegmentation",
|
62 |
+
"TFMobileViTModel",
|
63 |
+
"TFMobileViTPreTrainedModel",
|
64 |
+
]
|
65 |
+
|
66 |
+
if TYPE_CHECKING:
|
67 |
+
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
|
68 |
+
|
69 |
+
try:
|
70 |
+
if not is_vision_available():
|
71 |
+
raise OptionalDependencyNotAvailable()
|
72 |
+
except OptionalDependencyNotAvailable:
|
73 |
+
pass
|
74 |
+
else:
|
75 |
+
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
|
76 |
+
from .image_processing_mobilevit import MobileViTImageProcessor
|
77 |
+
|
78 |
+
try:
|
79 |
+
if not is_torch_available():
|
80 |
+
raise OptionalDependencyNotAvailable()
|
81 |
+
except OptionalDependencyNotAvailable:
|
82 |
+
pass
|
83 |
+
else:
|
84 |
+
from .modeling_mobilevit import (
|
85 |
+
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
86 |
+
MobileViTForImageClassification,
|
87 |
+
MobileViTForSemanticSegmentation,
|
88 |
+
MobileViTModel,
|
89 |
+
MobileViTPreTrainedModel,
|
90 |
+
)
|
91 |
+
|
92 |
+
try:
|
93 |
+
if not is_tf_available():
|
94 |
+
raise OptionalDependencyNotAvailable()
|
95 |
+
except OptionalDependencyNotAvailable:
|
96 |
+
pass
|
97 |
+
else:
|
98 |
+
from .modeling_tf_mobilevit import (
|
99 |
+
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
100 |
+
TFMobileViTForImageClassification,
|
101 |
+
TFMobileViTForSemanticSegmentation,
|
102 |
+
TFMobileViTModel,
|
103 |
+
TFMobileViTPreTrainedModel,
|
104 |
+
)
|
105 |
+
|
106 |
+
|
107 |
+
else:
|
108 |
+
import sys
|
109 |
+
|
110 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc
ADDED
Binary file (6.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc
ADDED
Binary file (8.74 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc
ADDED
Binary file (1.05 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc
ADDED
Binary file (16.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc
ADDED
Binary file (28.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc
ADDED
Binary file (40.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" MobileViT model configuration"""
|
16 |
+
|
17 |
+
from collections import OrderedDict
|
18 |
+
from typing import Mapping
|
19 |
+
|
20 |
+
from packaging import version
|
21 |
+
|
22 |
+
from ...configuration_utils import PretrainedConfig
|
23 |
+
from ...onnx import OnnxConfig
|
24 |
+
from ...utils import logging
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
from ..deprecated._archive_maps import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
31 |
+
|
32 |
+
|
33 |
+
class MobileViTConfig(PretrainedConfig):
|
34 |
+
r"""
|
35 |
+
This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
|
36 |
+
MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
|
37 |
+
configuration with the defaults will yield a similar configuration to that of the MobileViT
|
38 |
+
[apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
|
39 |
+
|
40 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
41 |
+
documentation from [`PretrainedConfig`] for more information.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
num_channels (`int`, *optional*, defaults to 3):
|
45 |
+
The number of input channels.
|
46 |
+
image_size (`int`, *optional*, defaults to 256):
|
47 |
+
The size (resolution) of each image.
|
48 |
+
patch_size (`int`, *optional*, defaults to 2):
|
49 |
+
The size (resolution) of each patch.
|
50 |
+
hidden_sizes (`List[int]`, *optional*, defaults to `[144, 192, 240]`):
|
51 |
+
Dimensionality (hidden size) of the Transformer encoders at each stage.
|
52 |
+
neck_hidden_sizes (`List[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
|
53 |
+
The number of channels for the feature maps of the backbone.
|
54 |
+
num_attention_heads (`int`, *optional*, defaults to 4):
|
55 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
56 |
+
mlp_ratio (`float`, *optional*, defaults to 2.0):
|
57 |
+
The ratio of the number of channels in the output of the MLP to the number of channels in the input.
|
58 |
+
expand_ratio (`float`, *optional*, defaults to 4.0):
|
59 |
+
Expansion factor for the MobileNetv2 layers.
|
60 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
61 |
+
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
|
62 |
+
conv_kernel_size (`int`, *optional*, defaults to 3):
|
63 |
+
The size of the convolutional kernel in the MobileViT layer.
|
64 |
+
output_stride (`int`, *optional*, defaults to 32):
|
65 |
+
The ratio of the spatial resolution of the output to the resolution of the input image.
|
66 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
67 |
+
The dropout probability for all fully connected layers in the Transformer encoder.
|
68 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
|
69 |
+
The dropout ratio for the attention probabilities.
|
70 |
+
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
|
71 |
+
The dropout ratio for attached classifiers.
|
72 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
73 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
74 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
75 |
+
The epsilon used by the layer normalization layers.
|
76 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
77 |
+
Whether to add a bias to the queries, keys and values.
|
78 |
+
aspp_out_channels (`int`, *optional*, defaults to 256):
|
79 |
+
Number of output channels used in the ASPP layer for semantic segmentation.
|
80 |
+
atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`):
|
81 |
+
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
|
82 |
+
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
|
83 |
+
The dropout ratio for the ASPP layer for semantic segmentation.
|
84 |
+
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
|
85 |
+
The index that is ignored by the loss function of the semantic segmentation model.
|
86 |
+
|
87 |
+
Example:
|
88 |
+
|
89 |
+
```python
|
90 |
+
>>> from transformers import MobileViTConfig, MobileViTModel
|
91 |
+
|
92 |
+
>>> # Initializing a mobilevit-small style configuration
|
93 |
+
>>> configuration = MobileViTConfig()
|
94 |
+
|
95 |
+
>>> # Initializing a model from the mobilevit-small style configuration
|
96 |
+
>>> model = MobileViTModel(configuration)
|
97 |
+
|
98 |
+
>>> # Accessing the model configuration
|
99 |
+
>>> configuration = model.config
|
100 |
+
```"""
|
101 |
+
|
102 |
+
model_type = "mobilevit"
|
103 |
+
|
104 |
+
def __init__(
|
105 |
+
self,
|
106 |
+
num_channels=3,
|
107 |
+
image_size=256,
|
108 |
+
patch_size=2,
|
109 |
+
hidden_sizes=[144, 192, 240],
|
110 |
+
neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640],
|
111 |
+
num_attention_heads=4,
|
112 |
+
mlp_ratio=2.0,
|
113 |
+
expand_ratio=4.0,
|
114 |
+
hidden_act="silu",
|
115 |
+
conv_kernel_size=3,
|
116 |
+
output_stride=32,
|
117 |
+
hidden_dropout_prob=0.1,
|
118 |
+
attention_probs_dropout_prob=0.0,
|
119 |
+
classifier_dropout_prob=0.1,
|
120 |
+
initializer_range=0.02,
|
121 |
+
layer_norm_eps=1e-5,
|
122 |
+
qkv_bias=True,
|
123 |
+
aspp_out_channels=256,
|
124 |
+
atrous_rates=[6, 12, 18],
|
125 |
+
aspp_dropout_prob=0.1,
|
126 |
+
semantic_loss_ignore_index=255,
|
127 |
+
**kwargs,
|
128 |
+
):
|
129 |
+
super().__init__(**kwargs)
|
130 |
+
|
131 |
+
self.num_channels = num_channels
|
132 |
+
self.image_size = image_size
|
133 |
+
self.patch_size = patch_size
|
134 |
+
self.hidden_sizes = hidden_sizes
|
135 |
+
self.neck_hidden_sizes = neck_hidden_sizes
|
136 |
+
self.num_attention_heads = num_attention_heads
|
137 |
+
self.mlp_ratio = mlp_ratio
|
138 |
+
self.expand_ratio = expand_ratio
|
139 |
+
self.hidden_act = hidden_act
|
140 |
+
self.conv_kernel_size = conv_kernel_size
|
141 |
+
self.output_stride = output_stride
|
142 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
143 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
144 |
+
self.classifier_dropout_prob = classifier_dropout_prob
|
145 |
+
self.initializer_range = initializer_range
|
146 |
+
self.layer_norm_eps = layer_norm_eps
|
147 |
+
self.qkv_bias = qkv_bias
|
148 |
+
|
149 |
+
# decode head attributes for semantic segmentation
|
150 |
+
self.aspp_out_channels = aspp_out_channels
|
151 |
+
self.atrous_rates = atrous_rates
|
152 |
+
self.aspp_dropout_prob = aspp_dropout_prob
|
153 |
+
self.semantic_loss_ignore_index = semantic_loss_ignore_index
|
154 |
+
|
155 |
+
|
156 |
+
class MobileViTOnnxConfig(OnnxConfig):
|
157 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
158 |
+
|
159 |
+
@property
|
160 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
161 |
+
return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})])
|
162 |
+
|
163 |
+
@property
|
164 |
+
def outputs(self) -> Mapping[str, Mapping[int, str]]:
|
165 |
+
if self.task == "image-classification":
|
166 |
+
return OrderedDict([("logits", {0: "batch"})])
|
167 |
+
else:
|
168 |
+
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
|
169 |
+
|
170 |
+
@property
|
171 |
+
def atol_for_validation(self) -> float:
|
172 |
+
return 1e-4
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py
ADDED
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert MobileViT checkpoints from the ml-cvnets library."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import json
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import requests
|
23 |
+
import torch
|
24 |
+
from huggingface_hub import hf_hub_download
|
25 |
+
from PIL import Image
|
26 |
+
|
27 |
+
from transformers import (
|
28 |
+
MobileViTConfig,
|
29 |
+
MobileViTForImageClassification,
|
30 |
+
MobileViTForSemanticSegmentation,
|
31 |
+
MobileViTImageProcessor,
|
32 |
+
)
|
33 |
+
from transformers.utils import logging
|
34 |
+
|
35 |
+
|
36 |
+
logging.set_verbosity_info()
|
37 |
+
logger = logging.get_logger(__name__)
|
38 |
+
|
39 |
+
|
40 |
+
def get_mobilevit_config(mobilevit_name):
|
41 |
+
config = MobileViTConfig()
|
42 |
+
|
43 |
+
# size of the architecture
|
44 |
+
if "mobilevit_s" in mobilevit_name:
|
45 |
+
config.hidden_sizes = [144, 192, 240]
|
46 |
+
config.neck_hidden_sizes = [16, 32, 64, 96, 128, 160, 640]
|
47 |
+
elif "mobilevit_xs" in mobilevit_name:
|
48 |
+
config.hidden_sizes = [96, 120, 144]
|
49 |
+
config.neck_hidden_sizes = [16, 32, 48, 64, 80, 96, 384]
|
50 |
+
elif "mobilevit_xxs" in mobilevit_name:
|
51 |
+
config.hidden_sizes = [64, 80, 96]
|
52 |
+
config.neck_hidden_sizes = [16, 16, 24, 48, 64, 80, 320]
|
53 |
+
config.hidden_dropout_prob = 0.05
|
54 |
+
config.expand_ratio = 2.0
|
55 |
+
|
56 |
+
if mobilevit_name.startswith("deeplabv3_"):
|
57 |
+
config.image_size = 512
|
58 |
+
config.output_stride = 16
|
59 |
+
config.num_labels = 21
|
60 |
+
filename = "pascal-voc-id2label.json"
|
61 |
+
else:
|
62 |
+
config.num_labels = 1000
|
63 |
+
filename = "imagenet-1k-id2label.json"
|
64 |
+
|
65 |
+
repo_id = "huggingface/label-files"
|
66 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
67 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
68 |
+
config.id2label = id2label
|
69 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
70 |
+
|
71 |
+
return config
|
72 |
+
|
73 |
+
|
74 |
+
def rename_key(name, base_model=False):
|
75 |
+
for i in range(1, 6):
|
76 |
+
if f"layer_{i}." in name:
|
77 |
+
name = name.replace(f"layer_{i}.", f"encoder.layer.{i - 1}.")
|
78 |
+
|
79 |
+
if "conv_1." in name:
|
80 |
+
name = name.replace("conv_1.", "conv_stem.")
|
81 |
+
if ".block." in name:
|
82 |
+
name = name.replace(".block.", ".")
|
83 |
+
|
84 |
+
if "exp_1x1" in name:
|
85 |
+
name = name.replace("exp_1x1", "expand_1x1")
|
86 |
+
if "red_1x1" in name:
|
87 |
+
name = name.replace("red_1x1", "reduce_1x1")
|
88 |
+
if ".local_rep.conv_3x3." in name:
|
89 |
+
name = name.replace(".local_rep.conv_3x3.", ".conv_kxk.")
|
90 |
+
if ".local_rep.conv_1x1." in name:
|
91 |
+
name = name.replace(".local_rep.conv_1x1.", ".conv_1x1.")
|
92 |
+
if ".norm." in name:
|
93 |
+
name = name.replace(".norm.", ".normalization.")
|
94 |
+
if ".conv." in name:
|
95 |
+
name = name.replace(".conv.", ".convolution.")
|
96 |
+
if ".conv_proj." in name:
|
97 |
+
name = name.replace(".conv_proj.", ".conv_projection.")
|
98 |
+
|
99 |
+
for i in range(0, 2):
|
100 |
+
for j in range(0, 4):
|
101 |
+
if f".{i}.{j}." in name:
|
102 |
+
name = name.replace(f".{i}.{j}.", f".{i}.layer.{j}.")
|
103 |
+
|
104 |
+
for i in range(2, 6):
|
105 |
+
for j in range(0, 4):
|
106 |
+
if f".{i}.{j}." in name:
|
107 |
+
name = name.replace(f".{i}.{j}.", f".{i}.")
|
108 |
+
if "expand_1x1" in name:
|
109 |
+
name = name.replace("expand_1x1", "downsampling_layer.expand_1x1")
|
110 |
+
if "conv_3x3" in name:
|
111 |
+
name = name.replace("conv_3x3", "downsampling_layer.conv_3x3")
|
112 |
+
if "reduce_1x1" in name:
|
113 |
+
name = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1")
|
114 |
+
|
115 |
+
for i in range(2, 5):
|
116 |
+
if f".global_rep.{i}.weight" in name:
|
117 |
+
name = name.replace(f".global_rep.{i}.weight", ".layernorm.weight")
|
118 |
+
if f".global_rep.{i}.bias" in name:
|
119 |
+
name = name.replace(f".global_rep.{i}.bias", ".layernorm.bias")
|
120 |
+
|
121 |
+
if ".global_rep." in name:
|
122 |
+
name = name.replace(".global_rep.", ".transformer.")
|
123 |
+
if ".pre_norm_mha.0." in name:
|
124 |
+
name = name.replace(".pre_norm_mha.0.", ".layernorm_before.")
|
125 |
+
if ".pre_norm_mha.1.out_proj." in name:
|
126 |
+
name = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense.")
|
127 |
+
if ".pre_norm_ffn.0." in name:
|
128 |
+
name = name.replace(".pre_norm_ffn.0.", ".layernorm_after.")
|
129 |
+
if ".pre_norm_ffn.1." in name:
|
130 |
+
name = name.replace(".pre_norm_ffn.1.", ".intermediate.dense.")
|
131 |
+
if ".pre_norm_ffn.4." in name:
|
132 |
+
name = name.replace(".pre_norm_ffn.4.", ".output.dense.")
|
133 |
+
if ".transformer." in name:
|
134 |
+
name = name.replace(".transformer.", ".transformer.layer.")
|
135 |
+
|
136 |
+
if ".aspp_layer." in name:
|
137 |
+
name = name.replace(".aspp_layer.", ".")
|
138 |
+
if ".aspp_pool." in name:
|
139 |
+
name = name.replace(".aspp_pool.", ".")
|
140 |
+
if "seg_head." in name:
|
141 |
+
name = name.replace("seg_head.", "segmentation_head.")
|
142 |
+
if "segmentation_head.classifier.classifier." in name:
|
143 |
+
name = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier.")
|
144 |
+
|
145 |
+
if "classifier.fc." in name:
|
146 |
+
name = name.replace("classifier.fc.", "classifier.")
|
147 |
+
elif (not base_model) and ("segmentation_head." not in name):
|
148 |
+
name = "mobilevit." + name
|
149 |
+
|
150 |
+
return name
|
151 |
+
|
152 |
+
|
153 |
+
def convert_state_dict(orig_state_dict, model, base_model=False):
|
154 |
+
if base_model:
|
155 |
+
model_prefix = ""
|
156 |
+
else:
|
157 |
+
model_prefix = "mobilevit."
|
158 |
+
|
159 |
+
for key in orig_state_dict.copy().keys():
|
160 |
+
val = orig_state_dict.pop(key)
|
161 |
+
|
162 |
+
if key[:8] == "encoder.":
|
163 |
+
key = key[8:]
|
164 |
+
|
165 |
+
if "qkv" in key:
|
166 |
+
key_split = key.split(".")
|
167 |
+
layer_num = int(key_split[0][6:]) - 1
|
168 |
+
transformer_num = int(key_split[3])
|
169 |
+
layer = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}")
|
170 |
+
dim = layer.transformer.layer[transformer_num].attention.attention.all_head_size
|
171 |
+
prefix = (
|
172 |
+
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
|
173 |
+
)
|
174 |
+
if "weight" in key:
|
175 |
+
orig_state_dict[prefix + "query.weight"] = val[:dim, :]
|
176 |
+
orig_state_dict[prefix + "key.weight"] = val[dim : dim * 2, :]
|
177 |
+
orig_state_dict[prefix + "value.weight"] = val[-dim:, :]
|
178 |
+
else:
|
179 |
+
orig_state_dict[prefix + "query.bias"] = val[:dim]
|
180 |
+
orig_state_dict[prefix + "key.bias"] = val[dim : dim * 2]
|
181 |
+
orig_state_dict[prefix + "value.bias"] = val[-dim:]
|
182 |
+
else:
|
183 |
+
orig_state_dict[rename_key(key, base_model)] = val
|
184 |
+
|
185 |
+
return orig_state_dict
|
186 |
+
|
187 |
+
|
188 |
+
# We will verify our results on an image of cute cats
|
189 |
+
def prepare_img():
|
190 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
191 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
192 |
+
return im
|
193 |
+
|
194 |
+
|
195 |
+
@torch.no_grad()
|
196 |
+
def convert_movilevit_checkpoint(mobilevit_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
|
197 |
+
"""
|
198 |
+
Copy/paste/tweak model's weights to our MobileViT structure.
|
199 |
+
"""
|
200 |
+
config = get_mobilevit_config(mobilevit_name)
|
201 |
+
|
202 |
+
# load original state_dict
|
203 |
+
state_dict = torch.load(checkpoint_path, map_location="cpu")
|
204 |
+
|
205 |
+
# load 🤗 model
|
206 |
+
if mobilevit_name.startswith("deeplabv3_"):
|
207 |
+
model = MobileViTForSemanticSegmentation(config).eval()
|
208 |
+
else:
|
209 |
+
model = MobileViTForImageClassification(config).eval()
|
210 |
+
|
211 |
+
new_state_dict = convert_state_dict(state_dict, model)
|
212 |
+
model.load_state_dict(new_state_dict)
|
213 |
+
|
214 |
+
# Check outputs on an image, prepared by MobileViTImageProcessor
|
215 |
+
image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32)
|
216 |
+
encoding = image_processor(images=prepare_img(), return_tensors="pt")
|
217 |
+
outputs = model(**encoding)
|
218 |
+
logits = outputs.logits
|
219 |
+
|
220 |
+
if mobilevit_name.startswith("deeplabv3_"):
|
221 |
+
assert logits.shape == (1, 21, 32, 32)
|
222 |
+
|
223 |
+
if mobilevit_name == "deeplabv3_mobilevit_s":
|
224 |
+
expected_logits = torch.tensor(
|
225 |
+
[
|
226 |
+
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
|
227 |
+
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
|
228 |
+
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
|
229 |
+
]
|
230 |
+
)
|
231 |
+
elif mobilevit_name == "deeplabv3_mobilevit_xs":
|
232 |
+
expected_logits = torch.tensor(
|
233 |
+
[
|
234 |
+
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
|
235 |
+
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
|
236 |
+
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
|
237 |
+
]
|
238 |
+
)
|
239 |
+
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
|
240 |
+
expected_logits = torch.tensor(
|
241 |
+
[
|
242 |
+
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
|
243 |
+
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
|
244 |
+
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
|
245 |
+
]
|
246 |
+
)
|
247 |
+
else:
|
248 |
+
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
|
249 |
+
|
250 |
+
assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
|
251 |
+
else:
|
252 |
+
assert logits.shape == (1, 1000)
|
253 |
+
|
254 |
+
if mobilevit_name == "mobilevit_s":
|
255 |
+
expected_logits = torch.tensor([-0.9866, 0.2392, -1.1241])
|
256 |
+
elif mobilevit_name == "mobilevit_xs":
|
257 |
+
expected_logits = torch.tensor([-2.4761, -0.9399, -1.9587])
|
258 |
+
elif mobilevit_name == "mobilevit_xxs":
|
259 |
+
expected_logits = torch.tensor([-1.9364, -1.2327, -0.4653])
|
260 |
+
else:
|
261 |
+
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
|
262 |
+
|
263 |
+
assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
|
264 |
+
|
265 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
266 |
+
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}")
|
267 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
268 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
269 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
270 |
+
|
271 |
+
if push_to_hub:
|
272 |
+
model_mapping = {
|
273 |
+
"mobilevit_s": "mobilevit-small",
|
274 |
+
"mobilevit_xs": "mobilevit-x-small",
|
275 |
+
"mobilevit_xxs": "mobilevit-xx-small",
|
276 |
+
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
|
277 |
+
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
|
278 |
+
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
|
279 |
+
}
|
280 |
+
|
281 |
+
print("Pushing to the hub...")
|
282 |
+
model_name = model_mapping[mobilevit_name]
|
283 |
+
image_processor.push_to_hub(model_name, organization="apple")
|
284 |
+
model.push_to_hub(model_name, organization="apple")
|
285 |
+
|
286 |
+
|
287 |
+
if __name__ == "__main__":
|
288 |
+
parser = argparse.ArgumentParser()
|
289 |
+
# Required parameters
|
290 |
+
parser.add_argument(
|
291 |
+
"--mobilevit_name",
|
292 |
+
default="mobilevit_s",
|
293 |
+
type=str,
|
294 |
+
help=(
|
295 |
+
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
|
296 |
+
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
|
297 |
+
),
|
298 |
+
)
|
299 |
+
parser.add_argument(
|
300 |
+
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
|
301 |
+
)
|
302 |
+
parser.add_argument(
|
303 |
+
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
|
304 |
+
)
|
305 |
+
parser.add_argument(
|
306 |
+
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
|
307 |
+
)
|
308 |
+
|
309 |
+
args = parser.parse_args()
|
310 |
+
convert_movilevit_checkpoint(
|
311 |
+
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
|
312 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/feature_extraction_mobilevit.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Feature extractor class for MobileViT."""
|
16 |
+
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
from ...utils import logging
|
20 |
+
from .image_processing_mobilevit import MobileViTImageProcessor
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
class MobileViTFeatureExtractor(MobileViTImageProcessor):
|
27 |
+
def __init__(self, *args, **kwargs) -> None:
|
28 |
+
warnings.warn(
|
29 |
+
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
|
30 |
+
" Please use MobileViTImageProcessor instead.",
|
31 |
+
FutureWarning,
|
32 |
+
)
|
33 |
+
super().__init__(*args, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/image_processing_mobilevit.py
ADDED
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for MobileViT."""
|
16 |
+
|
17 |
+
from typing import Dict, List, Optional, Tuple, Union
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
22 |
+
from ...image_transforms import flip_channel_order, get_resize_output_image_size, resize, to_channel_dimension_format
|
23 |
+
from ...image_utils import (
|
24 |
+
ChannelDimension,
|
25 |
+
ImageInput,
|
26 |
+
PILImageResampling,
|
27 |
+
infer_channel_dimension_format,
|
28 |
+
is_scaled_image,
|
29 |
+
make_list_of_images,
|
30 |
+
to_numpy_array,
|
31 |
+
valid_images,
|
32 |
+
validate_kwargs,
|
33 |
+
validate_preprocess_arguments,
|
34 |
+
)
|
35 |
+
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
|
36 |
+
|
37 |
+
|
38 |
+
if is_vision_available():
|
39 |
+
import PIL
|
40 |
+
|
41 |
+
if is_torch_available():
|
42 |
+
import torch
|
43 |
+
|
44 |
+
|
45 |
+
logger = logging.get_logger(__name__)
|
46 |
+
|
47 |
+
|
48 |
+
class MobileViTImageProcessor(BaseImageProcessor):
|
49 |
+
r"""
|
50 |
+
Constructs a MobileViT image processor.
|
51 |
+
|
52 |
+
Args:
|
53 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
54 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
|
55 |
+
`do_resize` parameter in the `preprocess` method.
|
56 |
+
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
|
57 |
+
Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
|
58 |
+
`preprocess` method.
|
59 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
|
60 |
+
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
|
61 |
+
in the `preprocess` method.
|
62 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
63 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
64 |
+
parameter in the `preprocess` method.
|
65 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
66 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
67 |
+
`preprocess` method.
|
68 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
69 |
+
Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
|
70 |
+
image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
|
71 |
+
the `preprocess` method.
|
72 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
|
73 |
+
Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
|
74 |
+
the `crop_size` parameter in the `preprocess` method.
|
75 |
+
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
|
76 |
+
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
|
77 |
+
parameter in the `preprocess` method.
|
78 |
+
"""
|
79 |
+
|
80 |
+
model_input_names = ["pixel_values"]
|
81 |
+
|
82 |
+
def __init__(
|
83 |
+
self,
|
84 |
+
do_resize: bool = True,
|
85 |
+
size: Dict[str, int] = None,
|
86 |
+
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
87 |
+
do_rescale: bool = True,
|
88 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
89 |
+
do_center_crop: bool = True,
|
90 |
+
crop_size: Dict[str, int] = None,
|
91 |
+
do_flip_channel_order: bool = True,
|
92 |
+
**kwargs,
|
93 |
+
) -> None:
|
94 |
+
super().__init__(**kwargs)
|
95 |
+
size = size if size is not None else {"shortest_edge": 224}
|
96 |
+
size = get_size_dict(size, default_to_square=False)
|
97 |
+
crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
|
98 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
99 |
+
|
100 |
+
self.do_resize = do_resize
|
101 |
+
self.size = size
|
102 |
+
self.resample = resample
|
103 |
+
self.do_rescale = do_rescale
|
104 |
+
self.rescale_factor = rescale_factor
|
105 |
+
self.do_center_crop = do_center_crop
|
106 |
+
self.crop_size = crop_size
|
107 |
+
self.do_flip_channel_order = do_flip_channel_order
|
108 |
+
self._valid_processor_keys = [
|
109 |
+
"images",
|
110 |
+
"segmentation_maps",
|
111 |
+
"do_resize",
|
112 |
+
"size",
|
113 |
+
"resample",
|
114 |
+
"do_rescale",
|
115 |
+
"rescale_factor",
|
116 |
+
"do_center_crop",
|
117 |
+
"crop_size",
|
118 |
+
"do_flip_channel_order",
|
119 |
+
"return_tensors",
|
120 |
+
"data_format",
|
121 |
+
"input_data_format",
|
122 |
+
]
|
123 |
+
|
124 |
+
# Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize with PILImageResampling.BICUBIC->PILImageResampling.BILINEAR
|
125 |
+
def resize(
|
126 |
+
self,
|
127 |
+
image: np.ndarray,
|
128 |
+
size: Dict[str, int],
|
129 |
+
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
130 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
131 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
132 |
+
**kwargs,
|
133 |
+
) -> np.ndarray:
|
134 |
+
"""
|
135 |
+
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
|
136 |
+
resized to keep the input aspect ratio.
|
137 |
+
|
138 |
+
Args:
|
139 |
+
image (`np.ndarray`):
|
140 |
+
Image to resize.
|
141 |
+
size (`Dict[str, int]`):
|
142 |
+
Size of the output image.
|
143 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
|
144 |
+
Resampling filter to use when resiizing the image.
|
145 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
146 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
147 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
148 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
149 |
+
"""
|
150 |
+
default_to_square = True
|
151 |
+
if "shortest_edge" in size:
|
152 |
+
size = size["shortest_edge"]
|
153 |
+
default_to_square = False
|
154 |
+
elif "height" in size and "width" in size:
|
155 |
+
size = (size["height"], size["width"])
|
156 |
+
else:
|
157 |
+
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
|
158 |
+
|
159 |
+
output_size = get_resize_output_image_size(
|
160 |
+
image,
|
161 |
+
size=size,
|
162 |
+
default_to_square=default_to_square,
|
163 |
+
input_data_format=input_data_format,
|
164 |
+
)
|
165 |
+
return resize(
|
166 |
+
image,
|
167 |
+
size=output_size,
|
168 |
+
resample=resample,
|
169 |
+
data_format=data_format,
|
170 |
+
input_data_format=input_data_format,
|
171 |
+
**kwargs,
|
172 |
+
)
|
173 |
+
|
174 |
+
def flip_channel_order(
|
175 |
+
self,
|
176 |
+
image: np.ndarray,
|
177 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
178 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
179 |
+
) -> np.ndarray:
|
180 |
+
"""
|
181 |
+
Flip the color channels from RGB to BGR or vice versa.
|
182 |
+
|
183 |
+
Args:
|
184 |
+
image (`np.ndarray`):
|
185 |
+
The image, represented as a numpy array.
|
186 |
+
data_format (`ChannelDimension` or `str`, *optional*):
|
187 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
188 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
189 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
190 |
+
"""
|
191 |
+
return flip_channel_order(image, data_format=data_format, input_data_format=input_data_format)
|
192 |
+
|
193 |
+
def __call__(self, images, segmentation_maps=None, **kwargs):
|
194 |
+
"""
|
195 |
+
Preprocesses a batch of images and optionally segmentation maps.
|
196 |
+
|
197 |
+
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
|
198 |
+
passed in as positional arguments.
|
199 |
+
"""
|
200 |
+
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
|
201 |
+
|
202 |
+
def _preprocess(
|
203 |
+
self,
|
204 |
+
image: ImageInput,
|
205 |
+
do_resize: bool,
|
206 |
+
do_rescale: bool,
|
207 |
+
do_center_crop: bool,
|
208 |
+
do_flip_channel_order: bool,
|
209 |
+
size: Optional[Dict[str, int]] = None,
|
210 |
+
resample: PILImageResampling = None,
|
211 |
+
rescale_factor: Optional[float] = None,
|
212 |
+
crop_size: Optional[Dict[str, int]] = None,
|
213 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
214 |
+
):
|
215 |
+
if do_resize:
|
216 |
+
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
217 |
+
|
218 |
+
if do_rescale:
|
219 |
+
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
220 |
+
|
221 |
+
if do_center_crop:
|
222 |
+
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
|
223 |
+
|
224 |
+
if do_flip_channel_order:
|
225 |
+
image = self.flip_channel_order(image, input_data_format=input_data_format)
|
226 |
+
|
227 |
+
return image
|
228 |
+
|
229 |
+
def _preprocess_image(
|
230 |
+
self,
|
231 |
+
image: ImageInput,
|
232 |
+
do_resize: bool = None,
|
233 |
+
size: Dict[str, int] = None,
|
234 |
+
resample: PILImageResampling = None,
|
235 |
+
do_rescale: bool = None,
|
236 |
+
rescale_factor: float = None,
|
237 |
+
do_center_crop: bool = None,
|
238 |
+
crop_size: Dict[str, int] = None,
|
239 |
+
do_flip_channel_order: bool = None,
|
240 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
241 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
242 |
+
) -> np.ndarray:
|
243 |
+
"""Preprocesses a single image."""
|
244 |
+
# All transformations expect numpy arrays.
|
245 |
+
image = to_numpy_array(image)
|
246 |
+
if is_scaled_image(image) and do_rescale:
|
247 |
+
logger.warning_once(
|
248 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
249 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
250 |
+
)
|
251 |
+
if input_data_format is None:
|
252 |
+
input_data_format = infer_channel_dimension_format(image)
|
253 |
+
|
254 |
+
image = self._preprocess(
|
255 |
+
image=image,
|
256 |
+
do_resize=do_resize,
|
257 |
+
size=size,
|
258 |
+
resample=resample,
|
259 |
+
do_rescale=do_rescale,
|
260 |
+
rescale_factor=rescale_factor,
|
261 |
+
do_center_crop=do_center_crop,
|
262 |
+
crop_size=crop_size,
|
263 |
+
do_flip_channel_order=do_flip_channel_order,
|
264 |
+
input_data_format=input_data_format,
|
265 |
+
)
|
266 |
+
|
267 |
+
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
268 |
+
|
269 |
+
return image
|
270 |
+
|
271 |
+
def _preprocess_mask(
|
272 |
+
self,
|
273 |
+
segmentation_map: ImageInput,
|
274 |
+
do_resize: bool = None,
|
275 |
+
size: Dict[str, int] = None,
|
276 |
+
do_center_crop: bool = None,
|
277 |
+
crop_size: Dict[str, int] = None,
|
278 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
279 |
+
) -> np.ndarray:
|
280 |
+
"""Preprocesses a single mask."""
|
281 |
+
segmentation_map = to_numpy_array(segmentation_map)
|
282 |
+
# Add channel dimension if missing - needed for certain transformations
|
283 |
+
if segmentation_map.ndim == 2:
|
284 |
+
added_channel_dim = True
|
285 |
+
segmentation_map = segmentation_map[None, ...]
|
286 |
+
input_data_format = ChannelDimension.FIRST
|
287 |
+
else:
|
288 |
+
added_channel_dim = False
|
289 |
+
if input_data_format is None:
|
290 |
+
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
|
291 |
+
|
292 |
+
segmentation_map = self._preprocess(
|
293 |
+
image=segmentation_map,
|
294 |
+
do_resize=do_resize,
|
295 |
+
size=size,
|
296 |
+
resample=PILImageResampling.NEAREST,
|
297 |
+
do_rescale=False,
|
298 |
+
do_center_crop=do_center_crop,
|
299 |
+
crop_size=crop_size,
|
300 |
+
do_flip_channel_order=False,
|
301 |
+
input_data_format=input_data_format,
|
302 |
+
)
|
303 |
+
# Remove extra channel dimension if added for processing
|
304 |
+
if added_channel_dim:
|
305 |
+
segmentation_map = segmentation_map.squeeze(0)
|
306 |
+
segmentation_map = segmentation_map.astype(np.int64)
|
307 |
+
return segmentation_map
|
308 |
+
|
309 |
+
def preprocess(
|
310 |
+
self,
|
311 |
+
images: ImageInput,
|
312 |
+
segmentation_maps: Optional[ImageInput] = None,
|
313 |
+
do_resize: bool = None,
|
314 |
+
size: Dict[str, int] = None,
|
315 |
+
resample: PILImageResampling = None,
|
316 |
+
do_rescale: bool = None,
|
317 |
+
rescale_factor: float = None,
|
318 |
+
do_center_crop: bool = None,
|
319 |
+
crop_size: Dict[str, int] = None,
|
320 |
+
do_flip_channel_order: bool = None,
|
321 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
322 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
323 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
324 |
+
**kwargs,
|
325 |
+
) -> PIL.Image.Image:
|
326 |
+
"""
|
327 |
+
Preprocess an image or batch of images.
|
328 |
+
|
329 |
+
Args:
|
330 |
+
images (`ImageInput`):
|
331 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
332 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
333 |
+
segmentation_maps (`ImageInput`, *optional*):
|
334 |
+
Segmentation map to preprocess.
|
335 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
336 |
+
Whether to resize the image.
|
337 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
338 |
+
Size of the image after resizing.
|
339 |
+
resample (`int`, *optional*, defaults to `self.resample`):
|
340 |
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
|
341 |
+
has an effect if `do_resize` is set to `True`.
|
342 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
343 |
+
Whether to rescale the image by rescale factor.
|
344 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
345 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
346 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
347 |
+
Whether to center crop the image.
|
348 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
349 |
+
Size of the center crop if `do_center_crop` is set to `True`.
|
350 |
+
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
|
351 |
+
Whether to flip the channel order of the image.
|
352 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
353 |
+
The type of tensors to return. Can be one of:
|
354 |
+
- Unset: Return a list of `np.ndarray`.
|
355 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
356 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
357 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
358 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
359 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
360 |
+
The channel dimension format for the output image. Can be one of:
|
361 |
+
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
362 |
+
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
363 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
364 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
365 |
+
from the input image. Can be one of:
|
366 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
367 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
368 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
369 |
+
"""
|
370 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
371 |
+
resample = resample if resample is not None else self.resample
|
372 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
373 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
374 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
375 |
+
do_flip_channel_order = (
|
376 |
+
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
|
377 |
+
)
|
378 |
+
|
379 |
+
size = size if size is not None else self.size
|
380 |
+
size = get_size_dict(size, default_to_square=False)
|
381 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
382 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
383 |
+
|
384 |
+
images = make_list_of_images(images)
|
385 |
+
|
386 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
387 |
+
|
388 |
+
if segmentation_maps is not None:
|
389 |
+
segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
|
390 |
+
|
391 |
+
images = make_list_of_images(images)
|
392 |
+
|
393 |
+
if not valid_images(images):
|
394 |
+
raise ValueError(
|
395 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
396 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
397 |
+
)
|
398 |
+
|
399 |
+
if segmentation_maps is not None and not valid_images(segmentation_maps):
|
400 |
+
raise ValueError(
|
401 |
+
"Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
402 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
403 |
+
)
|
404 |
+
|
405 |
+
validate_preprocess_arguments(
|
406 |
+
do_rescale=do_rescale,
|
407 |
+
rescale_factor=rescale_factor,
|
408 |
+
do_center_crop=do_center_crop,
|
409 |
+
crop_size=crop_size,
|
410 |
+
do_resize=do_resize,
|
411 |
+
size=size,
|
412 |
+
resample=resample,
|
413 |
+
)
|
414 |
+
|
415 |
+
images = [
|
416 |
+
self._preprocess_image(
|
417 |
+
image=img,
|
418 |
+
do_resize=do_resize,
|
419 |
+
size=size,
|
420 |
+
resample=resample,
|
421 |
+
do_rescale=do_rescale,
|
422 |
+
rescale_factor=rescale_factor,
|
423 |
+
do_center_crop=do_center_crop,
|
424 |
+
crop_size=crop_size,
|
425 |
+
do_flip_channel_order=do_flip_channel_order,
|
426 |
+
data_format=data_format,
|
427 |
+
input_data_format=input_data_format,
|
428 |
+
)
|
429 |
+
for img in images
|
430 |
+
]
|
431 |
+
|
432 |
+
data = {"pixel_values": images}
|
433 |
+
|
434 |
+
if segmentation_maps is not None:
|
435 |
+
segmentation_maps = [
|
436 |
+
self._preprocess_mask(
|
437 |
+
segmentation_map=segmentation_map,
|
438 |
+
do_resize=do_resize,
|
439 |
+
size=size,
|
440 |
+
do_center_crop=do_center_crop,
|
441 |
+
crop_size=crop_size,
|
442 |
+
input_data_format=input_data_format,
|
443 |
+
)
|
444 |
+
for segmentation_map in segmentation_maps
|
445 |
+
]
|
446 |
+
|
447 |
+
data["labels"] = segmentation_maps
|
448 |
+
|
449 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
450 |
+
|
451 |
+
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileViT
|
452 |
+
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
|
453 |
+
"""
|
454 |
+
Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
|
455 |
+
|
456 |
+
Args:
|
457 |
+
outputs ([`MobileViTForSemanticSegmentation`]):
|
458 |
+
Raw outputs of the model.
|
459 |
+
target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
|
460 |
+
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
|
461 |
+
predictions will not be resized.
|
462 |
+
|
463 |
+
Returns:
|
464 |
+
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
|
465 |
+
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
|
466 |
+
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
|
467 |
+
"""
|
468 |
+
# TODO: add support for other frameworks
|
469 |
+
logits = outputs.logits
|
470 |
+
|
471 |
+
# Resize logits and compute semantic segmentation maps
|
472 |
+
if target_sizes is not None:
|
473 |
+
if len(logits) != len(target_sizes):
|
474 |
+
raise ValueError(
|
475 |
+
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
|
476 |
+
)
|
477 |
+
|
478 |
+
if is_torch_tensor(target_sizes):
|
479 |
+
target_sizes = target_sizes.numpy()
|
480 |
+
|
481 |
+
semantic_segmentation = []
|
482 |
+
|
483 |
+
for idx in range(len(logits)):
|
484 |
+
resized_logits = torch.nn.functional.interpolate(
|
485 |
+
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
|
486 |
+
)
|
487 |
+
semantic_map = resized_logits[0].argmax(dim=0)
|
488 |
+
semantic_segmentation.append(semantic_map)
|
489 |
+
else:
|
490 |
+
semantic_segmentation = logits.argmax(dim=1)
|
491 |
+
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
|
492 |
+
|
493 |
+
return semantic_segmentation
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_mobilevit.py
ADDED
@@ -0,0 +1,1066 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
|
17 |
+
""" PyTorch MobileViT model."""
|
18 |
+
|
19 |
+
|
20 |
+
import math
|
21 |
+
from typing import Dict, Optional, Set, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.utils.checkpoint
|
25 |
+
from torch import nn
|
26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
27 |
+
|
28 |
+
from ...activations import ACT2FN
|
29 |
+
from ...modeling_outputs import (
|
30 |
+
BaseModelOutputWithNoAttention,
|
31 |
+
BaseModelOutputWithPoolingAndNoAttention,
|
32 |
+
ImageClassifierOutputWithNoAttention,
|
33 |
+
SemanticSegmenterOutput,
|
34 |
+
)
|
35 |
+
from ...modeling_utils import PreTrainedModel
|
36 |
+
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
37 |
+
from ...utils import (
|
38 |
+
add_code_sample_docstrings,
|
39 |
+
add_start_docstrings,
|
40 |
+
add_start_docstrings_to_model_forward,
|
41 |
+
logging,
|
42 |
+
replace_return_docstrings,
|
43 |
+
)
|
44 |
+
from .configuration_mobilevit import MobileViTConfig
|
45 |
+
|
46 |
+
|
47 |
+
logger = logging.get_logger(__name__)
|
48 |
+
|
49 |
+
|
50 |
+
# General docstring
|
51 |
+
_CONFIG_FOR_DOC = "MobileViTConfig"
|
52 |
+
|
53 |
+
# Base docstring
|
54 |
+
_CHECKPOINT_FOR_DOC = "apple/mobilevit-small"
|
55 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8]
|
56 |
+
|
57 |
+
# Image classification docstring
|
58 |
+
_IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small"
|
59 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
60 |
+
|
61 |
+
|
62 |
+
from ..deprecated._archive_maps import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
63 |
+
|
64 |
+
|
65 |
+
def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
|
66 |
+
"""
|
67 |
+
Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
|
68 |
+
original TensorFlow repo. It can be seen here:
|
69 |
+
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
|
70 |
+
"""
|
71 |
+
if min_value is None:
|
72 |
+
min_value = divisor
|
73 |
+
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
|
74 |
+
# Make sure that round down does not go down by more than 10%.
|
75 |
+
if new_value < 0.9 * value:
|
76 |
+
new_value += divisor
|
77 |
+
return int(new_value)
|
78 |
+
|
79 |
+
|
80 |
+
class MobileViTConvLayer(nn.Module):
|
81 |
+
def __init__(
|
82 |
+
self,
|
83 |
+
config: MobileViTConfig,
|
84 |
+
in_channels: int,
|
85 |
+
out_channels: int,
|
86 |
+
kernel_size: int,
|
87 |
+
stride: int = 1,
|
88 |
+
groups: int = 1,
|
89 |
+
bias: bool = False,
|
90 |
+
dilation: int = 1,
|
91 |
+
use_normalization: bool = True,
|
92 |
+
use_activation: Union[bool, str] = True,
|
93 |
+
) -> None:
|
94 |
+
super().__init__()
|
95 |
+
padding = int((kernel_size - 1) / 2) * dilation
|
96 |
+
|
97 |
+
if in_channels % groups != 0:
|
98 |
+
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
|
99 |
+
if out_channels % groups != 0:
|
100 |
+
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
|
101 |
+
|
102 |
+
self.convolution = nn.Conv2d(
|
103 |
+
in_channels=in_channels,
|
104 |
+
out_channels=out_channels,
|
105 |
+
kernel_size=kernel_size,
|
106 |
+
stride=stride,
|
107 |
+
padding=padding,
|
108 |
+
dilation=dilation,
|
109 |
+
groups=groups,
|
110 |
+
bias=bias,
|
111 |
+
padding_mode="zeros",
|
112 |
+
)
|
113 |
+
|
114 |
+
if use_normalization:
|
115 |
+
self.normalization = nn.BatchNorm2d(
|
116 |
+
num_features=out_channels,
|
117 |
+
eps=1e-5,
|
118 |
+
momentum=0.1,
|
119 |
+
affine=True,
|
120 |
+
track_running_stats=True,
|
121 |
+
)
|
122 |
+
else:
|
123 |
+
self.normalization = None
|
124 |
+
|
125 |
+
if use_activation:
|
126 |
+
if isinstance(use_activation, str):
|
127 |
+
self.activation = ACT2FN[use_activation]
|
128 |
+
elif isinstance(config.hidden_act, str):
|
129 |
+
self.activation = ACT2FN[config.hidden_act]
|
130 |
+
else:
|
131 |
+
self.activation = config.hidden_act
|
132 |
+
else:
|
133 |
+
self.activation = None
|
134 |
+
|
135 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
136 |
+
features = self.convolution(features)
|
137 |
+
if self.normalization is not None:
|
138 |
+
features = self.normalization(features)
|
139 |
+
if self.activation is not None:
|
140 |
+
features = self.activation(features)
|
141 |
+
return features
|
142 |
+
|
143 |
+
|
144 |
+
class MobileViTInvertedResidual(nn.Module):
|
145 |
+
"""
|
146 |
+
Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
|
147 |
+
"""
|
148 |
+
|
149 |
+
def __init__(
|
150 |
+
self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1
|
151 |
+
) -> None:
|
152 |
+
super().__init__()
|
153 |
+
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
|
154 |
+
|
155 |
+
if stride not in [1, 2]:
|
156 |
+
raise ValueError(f"Invalid stride {stride}.")
|
157 |
+
|
158 |
+
self.use_residual = (stride == 1) and (in_channels == out_channels)
|
159 |
+
|
160 |
+
self.expand_1x1 = MobileViTConvLayer(
|
161 |
+
config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
|
162 |
+
)
|
163 |
+
|
164 |
+
self.conv_3x3 = MobileViTConvLayer(
|
165 |
+
config,
|
166 |
+
in_channels=expanded_channels,
|
167 |
+
out_channels=expanded_channels,
|
168 |
+
kernel_size=3,
|
169 |
+
stride=stride,
|
170 |
+
groups=expanded_channels,
|
171 |
+
dilation=dilation,
|
172 |
+
)
|
173 |
+
|
174 |
+
self.reduce_1x1 = MobileViTConvLayer(
|
175 |
+
config,
|
176 |
+
in_channels=expanded_channels,
|
177 |
+
out_channels=out_channels,
|
178 |
+
kernel_size=1,
|
179 |
+
use_activation=False,
|
180 |
+
)
|
181 |
+
|
182 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
183 |
+
residual = features
|
184 |
+
|
185 |
+
features = self.expand_1x1(features)
|
186 |
+
features = self.conv_3x3(features)
|
187 |
+
features = self.reduce_1x1(features)
|
188 |
+
|
189 |
+
return residual + features if self.use_residual else features
|
190 |
+
|
191 |
+
|
192 |
+
class MobileViTMobileNetLayer(nn.Module):
|
193 |
+
def __init__(
|
194 |
+
self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
|
195 |
+
) -> None:
|
196 |
+
super().__init__()
|
197 |
+
|
198 |
+
self.layer = nn.ModuleList()
|
199 |
+
for i in range(num_stages):
|
200 |
+
layer = MobileViTInvertedResidual(
|
201 |
+
config,
|
202 |
+
in_channels=in_channels,
|
203 |
+
out_channels=out_channels,
|
204 |
+
stride=stride if i == 0 else 1,
|
205 |
+
)
|
206 |
+
self.layer.append(layer)
|
207 |
+
in_channels = out_channels
|
208 |
+
|
209 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
210 |
+
for layer_module in self.layer:
|
211 |
+
features = layer_module(features)
|
212 |
+
return features
|
213 |
+
|
214 |
+
|
215 |
+
class MobileViTSelfAttention(nn.Module):
|
216 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
|
217 |
+
super().__init__()
|
218 |
+
|
219 |
+
if hidden_size % config.num_attention_heads != 0:
|
220 |
+
raise ValueError(
|
221 |
+
f"The hidden size {hidden_size,} is not a multiple of the number of attention "
|
222 |
+
f"heads {config.num_attention_heads}."
|
223 |
+
)
|
224 |
+
|
225 |
+
self.num_attention_heads = config.num_attention_heads
|
226 |
+
self.attention_head_size = int(hidden_size / config.num_attention_heads)
|
227 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
228 |
+
|
229 |
+
self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
|
230 |
+
self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
|
231 |
+
self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
|
232 |
+
|
233 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
234 |
+
|
235 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
236 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
237 |
+
x = x.view(*new_x_shape)
|
238 |
+
return x.permute(0, 2, 1, 3)
|
239 |
+
|
240 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
241 |
+
mixed_query_layer = self.query(hidden_states)
|
242 |
+
|
243 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
244 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
245 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
246 |
+
|
247 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
248 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
249 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
250 |
+
|
251 |
+
# Normalize the attention scores to probabilities.
|
252 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
253 |
+
|
254 |
+
# This is actually dropping out entire tokens to attend to, which might
|
255 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
256 |
+
attention_probs = self.dropout(attention_probs)
|
257 |
+
|
258 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
259 |
+
|
260 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
261 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
262 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
263 |
+
return context_layer
|
264 |
+
|
265 |
+
|
266 |
+
class MobileViTSelfOutput(nn.Module):
|
267 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
|
268 |
+
super().__init__()
|
269 |
+
self.dense = nn.Linear(hidden_size, hidden_size)
|
270 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
271 |
+
|
272 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
273 |
+
hidden_states = self.dense(hidden_states)
|
274 |
+
hidden_states = self.dropout(hidden_states)
|
275 |
+
return hidden_states
|
276 |
+
|
277 |
+
|
278 |
+
class MobileViTAttention(nn.Module):
|
279 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
|
280 |
+
super().__init__()
|
281 |
+
self.attention = MobileViTSelfAttention(config, hidden_size)
|
282 |
+
self.output = MobileViTSelfOutput(config, hidden_size)
|
283 |
+
self.pruned_heads = set()
|
284 |
+
|
285 |
+
def prune_heads(self, heads: Set[int]) -> None:
|
286 |
+
if len(heads) == 0:
|
287 |
+
return
|
288 |
+
heads, index = find_pruneable_heads_and_indices(
|
289 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
290 |
+
)
|
291 |
+
|
292 |
+
# Prune linear layers
|
293 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
294 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
295 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
296 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
297 |
+
|
298 |
+
# Update hyper params and store pruned heads
|
299 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
300 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
301 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
302 |
+
|
303 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
304 |
+
self_outputs = self.attention(hidden_states)
|
305 |
+
attention_output = self.output(self_outputs)
|
306 |
+
return attention_output
|
307 |
+
|
308 |
+
|
309 |
+
class MobileViTIntermediate(nn.Module):
|
310 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
|
311 |
+
super().__init__()
|
312 |
+
self.dense = nn.Linear(hidden_size, intermediate_size)
|
313 |
+
if isinstance(config.hidden_act, str):
|
314 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
315 |
+
else:
|
316 |
+
self.intermediate_act_fn = config.hidden_act
|
317 |
+
|
318 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
319 |
+
hidden_states = self.dense(hidden_states)
|
320 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
321 |
+
return hidden_states
|
322 |
+
|
323 |
+
|
324 |
+
class MobileViTOutput(nn.Module):
|
325 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
|
326 |
+
super().__init__()
|
327 |
+
self.dense = nn.Linear(intermediate_size, hidden_size)
|
328 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
329 |
+
|
330 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
331 |
+
hidden_states = self.dense(hidden_states)
|
332 |
+
hidden_states = self.dropout(hidden_states)
|
333 |
+
hidden_states = hidden_states + input_tensor
|
334 |
+
return hidden_states
|
335 |
+
|
336 |
+
|
337 |
+
class MobileViTTransformerLayer(nn.Module):
|
338 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
|
339 |
+
super().__init__()
|
340 |
+
self.attention = MobileViTAttention(config, hidden_size)
|
341 |
+
self.intermediate = MobileViTIntermediate(config, hidden_size, intermediate_size)
|
342 |
+
self.output = MobileViTOutput(config, hidden_size, intermediate_size)
|
343 |
+
self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
|
344 |
+
self.layernorm_after = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
|
345 |
+
|
346 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
347 |
+
attention_output = self.attention(self.layernorm_before(hidden_states))
|
348 |
+
hidden_states = attention_output + hidden_states
|
349 |
+
|
350 |
+
layer_output = self.layernorm_after(hidden_states)
|
351 |
+
layer_output = self.intermediate(layer_output)
|
352 |
+
layer_output = self.output(layer_output, hidden_states)
|
353 |
+
return layer_output
|
354 |
+
|
355 |
+
|
356 |
+
class MobileViTTransformer(nn.Module):
|
357 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None:
|
358 |
+
super().__init__()
|
359 |
+
|
360 |
+
self.layer = nn.ModuleList()
|
361 |
+
for _ in range(num_stages):
|
362 |
+
transformer_layer = MobileViTTransformerLayer(
|
363 |
+
config,
|
364 |
+
hidden_size=hidden_size,
|
365 |
+
intermediate_size=int(hidden_size * config.mlp_ratio),
|
366 |
+
)
|
367 |
+
self.layer.append(transformer_layer)
|
368 |
+
|
369 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
370 |
+
for layer_module in self.layer:
|
371 |
+
hidden_states = layer_module(hidden_states)
|
372 |
+
return hidden_states
|
373 |
+
|
374 |
+
|
375 |
+
class MobileViTLayer(nn.Module):
|
376 |
+
"""
|
377 |
+
MobileViT block: https://arxiv.org/abs/2110.02178
|
378 |
+
"""
|
379 |
+
|
380 |
+
def __init__(
|
381 |
+
self,
|
382 |
+
config: MobileViTConfig,
|
383 |
+
in_channels: int,
|
384 |
+
out_channels: int,
|
385 |
+
stride: int,
|
386 |
+
hidden_size: int,
|
387 |
+
num_stages: int,
|
388 |
+
dilation: int = 1,
|
389 |
+
) -> None:
|
390 |
+
super().__init__()
|
391 |
+
self.patch_width = config.patch_size
|
392 |
+
self.patch_height = config.patch_size
|
393 |
+
|
394 |
+
if stride == 2:
|
395 |
+
self.downsampling_layer = MobileViTInvertedResidual(
|
396 |
+
config,
|
397 |
+
in_channels=in_channels,
|
398 |
+
out_channels=out_channels,
|
399 |
+
stride=stride if dilation == 1 else 1,
|
400 |
+
dilation=dilation // 2 if dilation > 1 else 1,
|
401 |
+
)
|
402 |
+
in_channels = out_channels
|
403 |
+
else:
|
404 |
+
self.downsampling_layer = None
|
405 |
+
|
406 |
+
self.conv_kxk = MobileViTConvLayer(
|
407 |
+
config,
|
408 |
+
in_channels=in_channels,
|
409 |
+
out_channels=in_channels,
|
410 |
+
kernel_size=config.conv_kernel_size,
|
411 |
+
)
|
412 |
+
|
413 |
+
self.conv_1x1 = MobileViTConvLayer(
|
414 |
+
config,
|
415 |
+
in_channels=in_channels,
|
416 |
+
out_channels=hidden_size,
|
417 |
+
kernel_size=1,
|
418 |
+
use_normalization=False,
|
419 |
+
use_activation=False,
|
420 |
+
)
|
421 |
+
|
422 |
+
self.transformer = MobileViTTransformer(
|
423 |
+
config,
|
424 |
+
hidden_size=hidden_size,
|
425 |
+
num_stages=num_stages,
|
426 |
+
)
|
427 |
+
|
428 |
+
self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
|
429 |
+
|
430 |
+
self.conv_projection = MobileViTConvLayer(
|
431 |
+
config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1
|
432 |
+
)
|
433 |
+
|
434 |
+
self.fusion = MobileViTConvLayer(
|
435 |
+
config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size
|
436 |
+
)
|
437 |
+
|
438 |
+
def unfolding(self, features: torch.Tensor) -> Tuple[torch.Tensor, Dict]:
|
439 |
+
patch_width, patch_height = self.patch_width, self.patch_height
|
440 |
+
patch_area = int(patch_width * patch_height)
|
441 |
+
|
442 |
+
batch_size, channels, orig_height, orig_width = features.shape
|
443 |
+
|
444 |
+
new_height = int(math.ceil(orig_height / patch_height) * patch_height)
|
445 |
+
new_width = int(math.ceil(orig_width / patch_width) * patch_width)
|
446 |
+
|
447 |
+
interpolate = False
|
448 |
+
if new_width != orig_width or new_height != orig_height:
|
449 |
+
# Note: Padding can be done, but then it needs to be handled in attention function.
|
450 |
+
features = nn.functional.interpolate(
|
451 |
+
features, size=(new_height, new_width), mode="bilinear", align_corners=False
|
452 |
+
)
|
453 |
+
interpolate = True
|
454 |
+
|
455 |
+
# number of patches along width and height
|
456 |
+
num_patch_width = new_width // patch_width
|
457 |
+
num_patch_height = new_height // patch_height
|
458 |
+
num_patches = num_patch_height * num_patch_width
|
459 |
+
|
460 |
+
# convert from shape (batch_size, channels, orig_height, orig_width)
|
461 |
+
# to the shape (batch_size * patch_area, num_patches, channels)
|
462 |
+
patches = features.reshape(
|
463 |
+
batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width
|
464 |
+
)
|
465 |
+
patches = patches.transpose(1, 2)
|
466 |
+
patches = patches.reshape(batch_size, channels, num_patches, patch_area)
|
467 |
+
patches = patches.transpose(1, 3)
|
468 |
+
patches = patches.reshape(batch_size * patch_area, num_patches, -1)
|
469 |
+
|
470 |
+
info_dict = {
|
471 |
+
"orig_size": (orig_height, orig_width),
|
472 |
+
"batch_size": batch_size,
|
473 |
+
"channels": channels,
|
474 |
+
"interpolate": interpolate,
|
475 |
+
"num_patches": num_patches,
|
476 |
+
"num_patches_width": num_patch_width,
|
477 |
+
"num_patches_height": num_patch_height,
|
478 |
+
}
|
479 |
+
return patches, info_dict
|
480 |
+
|
481 |
+
def folding(self, patches: torch.Tensor, info_dict: Dict) -> torch.Tensor:
|
482 |
+
patch_width, patch_height = self.patch_width, self.patch_height
|
483 |
+
patch_area = int(patch_width * patch_height)
|
484 |
+
|
485 |
+
batch_size = info_dict["batch_size"]
|
486 |
+
channels = info_dict["channels"]
|
487 |
+
num_patches = info_dict["num_patches"]
|
488 |
+
num_patch_height = info_dict["num_patches_height"]
|
489 |
+
num_patch_width = info_dict["num_patches_width"]
|
490 |
+
|
491 |
+
# convert from shape (batch_size * patch_area, num_patches, channels)
|
492 |
+
# back to shape (batch_size, channels, orig_height, orig_width)
|
493 |
+
features = patches.contiguous().view(batch_size, patch_area, num_patches, -1)
|
494 |
+
features = features.transpose(1, 3)
|
495 |
+
features = features.reshape(
|
496 |
+
batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width
|
497 |
+
)
|
498 |
+
features = features.transpose(1, 2)
|
499 |
+
features = features.reshape(
|
500 |
+
batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width
|
501 |
+
)
|
502 |
+
|
503 |
+
if info_dict["interpolate"]:
|
504 |
+
features = nn.functional.interpolate(
|
505 |
+
features, size=info_dict["orig_size"], mode="bilinear", align_corners=False
|
506 |
+
)
|
507 |
+
|
508 |
+
return features
|
509 |
+
|
510 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
511 |
+
# reduce spatial dimensions if needed
|
512 |
+
if self.downsampling_layer:
|
513 |
+
features = self.downsampling_layer(features)
|
514 |
+
|
515 |
+
residual = features
|
516 |
+
|
517 |
+
# local representation
|
518 |
+
features = self.conv_kxk(features)
|
519 |
+
features = self.conv_1x1(features)
|
520 |
+
|
521 |
+
# convert feature map to patches
|
522 |
+
patches, info_dict = self.unfolding(features)
|
523 |
+
|
524 |
+
# learn global representations
|
525 |
+
patches = self.transformer(patches)
|
526 |
+
patches = self.layernorm(patches)
|
527 |
+
|
528 |
+
# convert patches back to feature maps
|
529 |
+
features = self.folding(patches, info_dict)
|
530 |
+
|
531 |
+
features = self.conv_projection(features)
|
532 |
+
features = self.fusion(torch.cat((residual, features), dim=1))
|
533 |
+
return features
|
534 |
+
|
535 |
+
|
536 |
+
class MobileViTEncoder(nn.Module):
|
537 |
+
def __init__(self, config: MobileViTConfig) -> None:
|
538 |
+
super().__init__()
|
539 |
+
self.config = config
|
540 |
+
|
541 |
+
self.layer = nn.ModuleList()
|
542 |
+
self.gradient_checkpointing = False
|
543 |
+
|
544 |
+
# segmentation architectures like DeepLab and PSPNet modify the strides
|
545 |
+
# of the classification backbones
|
546 |
+
dilate_layer_4 = dilate_layer_5 = False
|
547 |
+
if config.output_stride == 8:
|
548 |
+
dilate_layer_4 = True
|
549 |
+
dilate_layer_5 = True
|
550 |
+
elif config.output_stride == 16:
|
551 |
+
dilate_layer_5 = True
|
552 |
+
|
553 |
+
dilation = 1
|
554 |
+
|
555 |
+
layer_1 = MobileViTMobileNetLayer(
|
556 |
+
config,
|
557 |
+
in_channels=config.neck_hidden_sizes[0],
|
558 |
+
out_channels=config.neck_hidden_sizes[1],
|
559 |
+
stride=1,
|
560 |
+
num_stages=1,
|
561 |
+
)
|
562 |
+
self.layer.append(layer_1)
|
563 |
+
|
564 |
+
layer_2 = MobileViTMobileNetLayer(
|
565 |
+
config,
|
566 |
+
in_channels=config.neck_hidden_sizes[1],
|
567 |
+
out_channels=config.neck_hidden_sizes[2],
|
568 |
+
stride=2,
|
569 |
+
num_stages=3,
|
570 |
+
)
|
571 |
+
self.layer.append(layer_2)
|
572 |
+
|
573 |
+
layer_3 = MobileViTLayer(
|
574 |
+
config,
|
575 |
+
in_channels=config.neck_hidden_sizes[2],
|
576 |
+
out_channels=config.neck_hidden_sizes[3],
|
577 |
+
stride=2,
|
578 |
+
hidden_size=config.hidden_sizes[0],
|
579 |
+
num_stages=2,
|
580 |
+
)
|
581 |
+
self.layer.append(layer_3)
|
582 |
+
|
583 |
+
if dilate_layer_4:
|
584 |
+
dilation *= 2
|
585 |
+
|
586 |
+
layer_4 = MobileViTLayer(
|
587 |
+
config,
|
588 |
+
in_channels=config.neck_hidden_sizes[3],
|
589 |
+
out_channels=config.neck_hidden_sizes[4],
|
590 |
+
stride=2,
|
591 |
+
hidden_size=config.hidden_sizes[1],
|
592 |
+
num_stages=4,
|
593 |
+
dilation=dilation,
|
594 |
+
)
|
595 |
+
self.layer.append(layer_4)
|
596 |
+
|
597 |
+
if dilate_layer_5:
|
598 |
+
dilation *= 2
|
599 |
+
|
600 |
+
layer_5 = MobileViTLayer(
|
601 |
+
config,
|
602 |
+
in_channels=config.neck_hidden_sizes[4],
|
603 |
+
out_channels=config.neck_hidden_sizes[5],
|
604 |
+
stride=2,
|
605 |
+
hidden_size=config.hidden_sizes[2],
|
606 |
+
num_stages=3,
|
607 |
+
dilation=dilation,
|
608 |
+
)
|
609 |
+
self.layer.append(layer_5)
|
610 |
+
|
611 |
+
def forward(
|
612 |
+
self,
|
613 |
+
hidden_states: torch.Tensor,
|
614 |
+
output_hidden_states: bool = False,
|
615 |
+
return_dict: bool = True,
|
616 |
+
) -> Union[tuple, BaseModelOutputWithNoAttention]:
|
617 |
+
all_hidden_states = () if output_hidden_states else None
|
618 |
+
|
619 |
+
for i, layer_module in enumerate(self.layer):
|
620 |
+
if self.gradient_checkpointing and self.training:
|
621 |
+
hidden_states = self._gradient_checkpointing_func(
|
622 |
+
layer_module.__call__,
|
623 |
+
hidden_states,
|
624 |
+
)
|
625 |
+
else:
|
626 |
+
hidden_states = layer_module(hidden_states)
|
627 |
+
|
628 |
+
if output_hidden_states:
|
629 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
630 |
+
|
631 |
+
if not return_dict:
|
632 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
633 |
+
|
634 |
+
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
635 |
+
|
636 |
+
|
637 |
+
class MobileViTPreTrainedModel(PreTrainedModel):
|
638 |
+
"""
|
639 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
640 |
+
models.
|
641 |
+
"""
|
642 |
+
|
643 |
+
config_class = MobileViTConfig
|
644 |
+
base_model_prefix = "mobilevit"
|
645 |
+
main_input_name = "pixel_values"
|
646 |
+
supports_gradient_checkpointing = True
|
647 |
+
|
648 |
+
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
|
649 |
+
"""Initialize the weights"""
|
650 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
651 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
652 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
653 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
654 |
+
if module.bias is not None:
|
655 |
+
module.bias.data.zero_()
|
656 |
+
elif isinstance(module, nn.LayerNorm):
|
657 |
+
module.bias.data.zero_()
|
658 |
+
module.weight.data.fill_(1.0)
|
659 |
+
|
660 |
+
|
661 |
+
MOBILEVIT_START_DOCSTRING = r"""
|
662 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
663 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
664 |
+
behavior.
|
665 |
+
|
666 |
+
Parameters:
|
667 |
+
config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model.
|
668 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
669 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
670 |
+
"""
|
671 |
+
|
672 |
+
MOBILEVIT_INPUTS_DOCSTRING = r"""
|
673 |
+
Args:
|
674 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
675 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
676 |
+
[`MobileViTImageProcessor.__call__`] for details.
|
677 |
+
output_hidden_states (`bool`, *optional*):
|
678 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
679 |
+
more detail.
|
680 |
+
return_dict (`bool`, *optional*):
|
681 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
682 |
+
"""
|
683 |
+
|
684 |
+
|
685 |
+
@add_start_docstrings(
|
686 |
+
"The bare MobileViT model outputting raw hidden-states without any specific head on top.",
|
687 |
+
MOBILEVIT_START_DOCSTRING,
|
688 |
+
)
|
689 |
+
class MobileViTModel(MobileViTPreTrainedModel):
|
690 |
+
def __init__(self, config: MobileViTConfig, expand_output: bool = True):
|
691 |
+
super().__init__(config)
|
692 |
+
self.config = config
|
693 |
+
self.expand_output = expand_output
|
694 |
+
|
695 |
+
self.conv_stem = MobileViTConvLayer(
|
696 |
+
config,
|
697 |
+
in_channels=config.num_channels,
|
698 |
+
out_channels=config.neck_hidden_sizes[0],
|
699 |
+
kernel_size=3,
|
700 |
+
stride=2,
|
701 |
+
)
|
702 |
+
|
703 |
+
self.encoder = MobileViTEncoder(config)
|
704 |
+
|
705 |
+
if self.expand_output:
|
706 |
+
self.conv_1x1_exp = MobileViTConvLayer(
|
707 |
+
config,
|
708 |
+
in_channels=config.neck_hidden_sizes[5],
|
709 |
+
out_channels=config.neck_hidden_sizes[6],
|
710 |
+
kernel_size=1,
|
711 |
+
)
|
712 |
+
|
713 |
+
# Initialize weights and apply final processing
|
714 |
+
self.post_init()
|
715 |
+
|
716 |
+
def _prune_heads(self, heads_to_prune):
|
717 |
+
"""Prunes heads of the model.
|
718 |
+
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
|
719 |
+
"""
|
720 |
+
for layer_index, heads in heads_to_prune.items():
|
721 |
+
mobilevit_layer = self.encoder.layer[layer_index]
|
722 |
+
if isinstance(mobilevit_layer, MobileViTLayer):
|
723 |
+
for transformer_layer in mobilevit_layer.transformer.layer:
|
724 |
+
transformer_layer.attention.prune_heads(heads)
|
725 |
+
|
726 |
+
@add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
|
727 |
+
@add_code_sample_docstrings(
|
728 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
729 |
+
output_type=BaseModelOutputWithPoolingAndNoAttention,
|
730 |
+
config_class=_CONFIG_FOR_DOC,
|
731 |
+
modality="vision",
|
732 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
733 |
+
)
|
734 |
+
def forward(
|
735 |
+
self,
|
736 |
+
pixel_values: Optional[torch.Tensor] = None,
|
737 |
+
output_hidden_states: Optional[bool] = None,
|
738 |
+
return_dict: Optional[bool] = None,
|
739 |
+
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
|
740 |
+
output_hidden_states = (
|
741 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
742 |
+
)
|
743 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
744 |
+
|
745 |
+
if pixel_values is None:
|
746 |
+
raise ValueError("You have to specify pixel_values")
|
747 |
+
|
748 |
+
embedding_output = self.conv_stem(pixel_values)
|
749 |
+
|
750 |
+
encoder_outputs = self.encoder(
|
751 |
+
embedding_output,
|
752 |
+
output_hidden_states=output_hidden_states,
|
753 |
+
return_dict=return_dict,
|
754 |
+
)
|
755 |
+
|
756 |
+
if self.expand_output:
|
757 |
+
last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
|
758 |
+
|
759 |
+
# global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
|
760 |
+
pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
|
761 |
+
else:
|
762 |
+
last_hidden_state = encoder_outputs[0]
|
763 |
+
pooled_output = None
|
764 |
+
|
765 |
+
if not return_dict:
|
766 |
+
output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
|
767 |
+
return output + encoder_outputs[1:]
|
768 |
+
|
769 |
+
return BaseModelOutputWithPoolingAndNoAttention(
|
770 |
+
last_hidden_state=last_hidden_state,
|
771 |
+
pooler_output=pooled_output,
|
772 |
+
hidden_states=encoder_outputs.hidden_states,
|
773 |
+
)
|
774 |
+
|
775 |
+
|
776 |
+
@add_start_docstrings(
|
777 |
+
"""
|
778 |
+
MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
|
779 |
+
ImageNet.
|
780 |
+
""",
|
781 |
+
MOBILEVIT_START_DOCSTRING,
|
782 |
+
)
|
783 |
+
class MobileViTForImageClassification(MobileViTPreTrainedModel):
|
784 |
+
def __init__(self, config: MobileViTConfig) -> None:
|
785 |
+
super().__init__(config)
|
786 |
+
|
787 |
+
self.num_labels = config.num_labels
|
788 |
+
self.mobilevit = MobileViTModel(config)
|
789 |
+
|
790 |
+
# Classifier head
|
791 |
+
self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
|
792 |
+
self.classifier = (
|
793 |
+
nn.Linear(config.neck_hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
|
794 |
+
)
|
795 |
+
|
796 |
+
# Initialize weights and apply final processing
|
797 |
+
self.post_init()
|
798 |
+
|
799 |
+
@add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
|
800 |
+
@add_code_sample_docstrings(
|
801 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
802 |
+
output_type=ImageClassifierOutputWithNoAttention,
|
803 |
+
config_class=_CONFIG_FOR_DOC,
|
804 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
805 |
+
)
|
806 |
+
def forward(
|
807 |
+
self,
|
808 |
+
pixel_values: Optional[torch.Tensor] = None,
|
809 |
+
output_hidden_states: Optional[bool] = None,
|
810 |
+
labels: Optional[torch.Tensor] = None,
|
811 |
+
return_dict: Optional[bool] = None,
|
812 |
+
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
|
813 |
+
r"""
|
814 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
815 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
816 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
|
817 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
818 |
+
"""
|
819 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
820 |
+
|
821 |
+
outputs = self.mobilevit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
822 |
+
|
823 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
824 |
+
|
825 |
+
logits = self.classifier(self.dropout(pooled_output))
|
826 |
+
|
827 |
+
loss = None
|
828 |
+
if labels is not None:
|
829 |
+
if self.config.problem_type is None:
|
830 |
+
if self.num_labels == 1:
|
831 |
+
self.config.problem_type = "regression"
|
832 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
833 |
+
self.config.problem_type = "single_label_classification"
|
834 |
+
else:
|
835 |
+
self.config.problem_type = "multi_label_classification"
|
836 |
+
|
837 |
+
if self.config.problem_type == "regression":
|
838 |
+
loss_fct = MSELoss()
|
839 |
+
if self.num_labels == 1:
|
840 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
841 |
+
else:
|
842 |
+
loss = loss_fct(logits, labels)
|
843 |
+
elif self.config.problem_type == "single_label_classification":
|
844 |
+
loss_fct = CrossEntropyLoss()
|
845 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
846 |
+
elif self.config.problem_type == "multi_label_classification":
|
847 |
+
loss_fct = BCEWithLogitsLoss()
|
848 |
+
loss = loss_fct(logits, labels)
|
849 |
+
|
850 |
+
if not return_dict:
|
851 |
+
output = (logits,) + outputs[2:]
|
852 |
+
return ((loss,) + output) if loss is not None else output
|
853 |
+
|
854 |
+
return ImageClassifierOutputWithNoAttention(
|
855 |
+
loss=loss,
|
856 |
+
logits=logits,
|
857 |
+
hidden_states=outputs.hidden_states,
|
858 |
+
)
|
859 |
+
|
860 |
+
|
861 |
+
class MobileViTASPPPooling(nn.Module):
|
862 |
+
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None:
|
863 |
+
super().__init__()
|
864 |
+
|
865 |
+
self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
|
866 |
+
|
867 |
+
self.conv_1x1 = MobileViTConvLayer(
|
868 |
+
config,
|
869 |
+
in_channels=in_channels,
|
870 |
+
out_channels=out_channels,
|
871 |
+
kernel_size=1,
|
872 |
+
stride=1,
|
873 |
+
use_normalization=True,
|
874 |
+
use_activation="relu",
|
875 |
+
)
|
876 |
+
|
877 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
878 |
+
spatial_size = features.shape[-2:]
|
879 |
+
features = self.global_pool(features)
|
880 |
+
features = self.conv_1x1(features)
|
881 |
+
features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False)
|
882 |
+
return features
|
883 |
+
|
884 |
+
|
885 |
+
class MobileViTASPP(nn.Module):
|
886 |
+
"""
|
887 |
+
ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
|
888 |
+
"""
|
889 |
+
|
890 |
+
def __init__(self, config: MobileViTConfig) -> None:
|
891 |
+
super().__init__()
|
892 |
+
|
893 |
+
in_channels = config.neck_hidden_sizes[-2]
|
894 |
+
out_channels = config.aspp_out_channels
|
895 |
+
|
896 |
+
if len(config.atrous_rates) != 3:
|
897 |
+
raise ValueError("Expected 3 values for atrous_rates")
|
898 |
+
|
899 |
+
self.convs = nn.ModuleList()
|
900 |
+
|
901 |
+
in_projection = MobileViTConvLayer(
|
902 |
+
config,
|
903 |
+
in_channels=in_channels,
|
904 |
+
out_channels=out_channels,
|
905 |
+
kernel_size=1,
|
906 |
+
use_activation="relu",
|
907 |
+
)
|
908 |
+
self.convs.append(in_projection)
|
909 |
+
|
910 |
+
self.convs.extend(
|
911 |
+
[
|
912 |
+
MobileViTConvLayer(
|
913 |
+
config,
|
914 |
+
in_channels=in_channels,
|
915 |
+
out_channels=out_channels,
|
916 |
+
kernel_size=3,
|
917 |
+
dilation=rate,
|
918 |
+
use_activation="relu",
|
919 |
+
)
|
920 |
+
for rate in config.atrous_rates
|
921 |
+
]
|
922 |
+
)
|
923 |
+
|
924 |
+
pool_layer = MobileViTASPPPooling(config, in_channels, out_channels)
|
925 |
+
self.convs.append(pool_layer)
|
926 |
+
|
927 |
+
self.project = MobileViTConvLayer(
|
928 |
+
config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
|
929 |
+
)
|
930 |
+
|
931 |
+
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
|
932 |
+
|
933 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
934 |
+
pyramid = []
|
935 |
+
for conv in self.convs:
|
936 |
+
pyramid.append(conv(features))
|
937 |
+
pyramid = torch.cat(pyramid, dim=1)
|
938 |
+
|
939 |
+
pooled_features = self.project(pyramid)
|
940 |
+
pooled_features = self.dropout(pooled_features)
|
941 |
+
return pooled_features
|
942 |
+
|
943 |
+
|
944 |
+
class MobileViTDeepLabV3(nn.Module):
|
945 |
+
"""
|
946 |
+
DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
|
947 |
+
"""
|
948 |
+
|
949 |
+
def __init__(self, config: MobileViTConfig) -> None:
|
950 |
+
super().__init__()
|
951 |
+
self.aspp = MobileViTASPP(config)
|
952 |
+
|
953 |
+
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
|
954 |
+
|
955 |
+
self.classifier = MobileViTConvLayer(
|
956 |
+
config,
|
957 |
+
in_channels=config.aspp_out_channels,
|
958 |
+
out_channels=config.num_labels,
|
959 |
+
kernel_size=1,
|
960 |
+
use_normalization=False,
|
961 |
+
use_activation=False,
|
962 |
+
bias=True,
|
963 |
+
)
|
964 |
+
|
965 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
966 |
+
features = self.aspp(hidden_states[-1])
|
967 |
+
features = self.dropout(features)
|
968 |
+
features = self.classifier(features)
|
969 |
+
return features
|
970 |
+
|
971 |
+
|
972 |
+
@add_start_docstrings(
|
973 |
+
"""
|
974 |
+
MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC.
|
975 |
+
""",
|
976 |
+
MOBILEVIT_START_DOCSTRING,
|
977 |
+
)
|
978 |
+
class MobileViTForSemanticSegmentation(MobileViTPreTrainedModel):
|
979 |
+
def __init__(self, config: MobileViTConfig) -> None:
|
980 |
+
super().__init__(config)
|
981 |
+
|
982 |
+
self.num_labels = config.num_labels
|
983 |
+
self.mobilevit = MobileViTModel(config, expand_output=False)
|
984 |
+
self.segmentation_head = MobileViTDeepLabV3(config)
|
985 |
+
|
986 |
+
# Initialize weights and apply final processing
|
987 |
+
self.post_init()
|
988 |
+
|
989 |
+
@add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
|
990 |
+
@replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
|
991 |
+
def forward(
|
992 |
+
self,
|
993 |
+
pixel_values: Optional[torch.Tensor] = None,
|
994 |
+
labels: Optional[torch.Tensor] = None,
|
995 |
+
output_hidden_states: Optional[bool] = None,
|
996 |
+
return_dict: Optional[bool] = None,
|
997 |
+
) -> Union[tuple, SemanticSegmenterOutput]:
|
998 |
+
r"""
|
999 |
+
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
|
1000 |
+
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
|
1001 |
+
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
|
1002 |
+
|
1003 |
+
Returns:
|
1004 |
+
|
1005 |
+
Examples:
|
1006 |
+
|
1007 |
+
```python
|
1008 |
+
>>> import requests
|
1009 |
+
>>> import torch
|
1010 |
+
>>> from PIL import Image
|
1011 |
+
>>> from transformers import AutoImageProcessor, MobileViTForSemanticSegmentation
|
1012 |
+
|
1013 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1014 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1015 |
+
|
1016 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small")
|
1017 |
+
>>> model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small")
|
1018 |
+
|
1019 |
+
>>> inputs = image_processor(images=image, return_tensors="pt")
|
1020 |
+
|
1021 |
+
>>> with torch.no_grad():
|
1022 |
+
... outputs = model(**inputs)
|
1023 |
+
|
1024 |
+
>>> # logits are of shape (batch_size, num_labels, height, width)
|
1025 |
+
>>> logits = outputs.logits
|
1026 |
+
```"""
|
1027 |
+
output_hidden_states = (
|
1028 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1029 |
+
)
|
1030 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1031 |
+
|
1032 |
+
outputs = self.mobilevit(
|
1033 |
+
pixel_values,
|
1034 |
+
output_hidden_states=True, # we need the intermediate hidden states
|
1035 |
+
return_dict=return_dict,
|
1036 |
+
)
|
1037 |
+
|
1038 |
+
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
1039 |
+
|
1040 |
+
logits = self.segmentation_head(encoder_hidden_states)
|
1041 |
+
|
1042 |
+
loss = None
|
1043 |
+
if labels is not None:
|
1044 |
+
if self.config.num_labels == 1:
|
1045 |
+
raise ValueError("The number of labels should be greater than one")
|
1046 |
+
else:
|
1047 |
+
# upsample logits to the images' original size
|
1048 |
+
upsampled_logits = nn.functional.interpolate(
|
1049 |
+
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
|
1050 |
+
)
|
1051 |
+
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
|
1052 |
+
loss = loss_fct(upsampled_logits, labels)
|
1053 |
+
|
1054 |
+
if not return_dict:
|
1055 |
+
if output_hidden_states:
|
1056 |
+
output = (logits,) + outputs[1:]
|
1057 |
+
else:
|
1058 |
+
output = (logits,) + outputs[2:]
|
1059 |
+
return ((loss,) + output) if loss is not None else output
|
1060 |
+
|
1061 |
+
return SemanticSegmenterOutput(
|
1062 |
+
loss=loss,
|
1063 |
+
logits=logits,
|
1064 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
1065 |
+
attentions=None,
|
1066 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_tf_mobilevit.py
ADDED
@@ -0,0 +1,1373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
|
17 |
+
""" TensorFlow 2.0 MobileViT model."""
|
18 |
+
|
19 |
+
from __future__ import annotations
|
20 |
+
|
21 |
+
from typing import Dict, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import tensorflow as tf
|
24 |
+
|
25 |
+
from ...activations_tf import get_tf_activation
|
26 |
+
from ...file_utils import (
|
27 |
+
add_code_sample_docstrings,
|
28 |
+
add_start_docstrings,
|
29 |
+
add_start_docstrings_to_model_forward,
|
30 |
+
replace_return_docstrings,
|
31 |
+
)
|
32 |
+
from ...modeling_tf_outputs import (
|
33 |
+
TFBaseModelOutput,
|
34 |
+
TFBaseModelOutputWithPooling,
|
35 |
+
TFImageClassifierOutputWithNoAttention,
|
36 |
+
TFSemanticSegmenterOutputWithNoAttention,
|
37 |
+
)
|
38 |
+
from ...modeling_tf_utils import (
|
39 |
+
TFPreTrainedModel,
|
40 |
+
TFSequenceClassificationLoss,
|
41 |
+
keras,
|
42 |
+
keras_serializable,
|
43 |
+
unpack_inputs,
|
44 |
+
)
|
45 |
+
from ...tf_utils import shape_list, stable_softmax
|
46 |
+
from ...utils import logging
|
47 |
+
from .configuration_mobilevit import MobileViTConfig
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
# General docstring
|
53 |
+
_CONFIG_FOR_DOC = "MobileViTConfig"
|
54 |
+
|
55 |
+
# Base docstring
|
56 |
+
_CHECKPOINT_FOR_DOC = "apple/mobilevit-small"
|
57 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8]
|
58 |
+
|
59 |
+
# Image classification docstring
|
60 |
+
_IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small"
|
61 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
62 |
+
|
63 |
+
|
64 |
+
from ..deprecated._archive_maps import TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
65 |
+
|
66 |
+
|
67 |
+
def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
|
68 |
+
"""
|
69 |
+
Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
|
70 |
+
original TensorFlow repo. It can be seen here:
|
71 |
+
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
|
72 |
+
"""
|
73 |
+
if min_value is None:
|
74 |
+
min_value = divisor
|
75 |
+
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
|
76 |
+
# Make sure that round down does not go down by more than 10%.
|
77 |
+
if new_value < 0.9 * value:
|
78 |
+
new_value += divisor
|
79 |
+
return int(new_value)
|
80 |
+
|
81 |
+
|
82 |
+
class TFMobileViTConvLayer(keras.layers.Layer):
|
83 |
+
def __init__(
|
84 |
+
self,
|
85 |
+
config: MobileViTConfig,
|
86 |
+
in_channels: int,
|
87 |
+
out_channels: int,
|
88 |
+
kernel_size: int,
|
89 |
+
stride: int = 1,
|
90 |
+
groups: int = 1,
|
91 |
+
bias: bool = False,
|
92 |
+
dilation: int = 1,
|
93 |
+
use_normalization: bool = True,
|
94 |
+
use_activation: Union[bool, str] = True,
|
95 |
+
**kwargs,
|
96 |
+
) -> None:
|
97 |
+
super().__init__(**kwargs)
|
98 |
+
logger.warning(
|
99 |
+
f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
|
100 |
+
"to train/fine-tune this model, you need a GPU or a TPU"
|
101 |
+
)
|
102 |
+
|
103 |
+
padding = int((kernel_size - 1) / 2) * dilation
|
104 |
+
self.padding = keras.layers.ZeroPadding2D(padding)
|
105 |
+
|
106 |
+
if out_channels % groups != 0:
|
107 |
+
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
|
108 |
+
|
109 |
+
self.convolution = keras.layers.Conv2D(
|
110 |
+
filters=out_channels,
|
111 |
+
kernel_size=kernel_size,
|
112 |
+
strides=stride,
|
113 |
+
padding="VALID",
|
114 |
+
dilation_rate=dilation,
|
115 |
+
groups=groups,
|
116 |
+
use_bias=bias,
|
117 |
+
name="convolution",
|
118 |
+
)
|
119 |
+
|
120 |
+
if use_normalization:
|
121 |
+
self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization")
|
122 |
+
else:
|
123 |
+
self.normalization = None
|
124 |
+
|
125 |
+
if use_activation:
|
126 |
+
if isinstance(use_activation, str):
|
127 |
+
self.activation = get_tf_activation(use_activation)
|
128 |
+
elif isinstance(config.hidden_act, str):
|
129 |
+
self.activation = get_tf_activation(config.hidden_act)
|
130 |
+
else:
|
131 |
+
self.activation = config.hidden_act
|
132 |
+
else:
|
133 |
+
self.activation = None
|
134 |
+
self.in_channels = in_channels
|
135 |
+
self.out_channels = out_channels
|
136 |
+
|
137 |
+
def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
|
138 |
+
padded_features = self.padding(features)
|
139 |
+
features = self.convolution(padded_features)
|
140 |
+
if self.normalization is not None:
|
141 |
+
features = self.normalization(features, training=training)
|
142 |
+
if self.activation is not None:
|
143 |
+
features = self.activation(features)
|
144 |
+
return features
|
145 |
+
|
146 |
+
def build(self, input_shape=None):
|
147 |
+
if self.built:
|
148 |
+
return
|
149 |
+
self.built = True
|
150 |
+
if getattr(self, "convolution", None) is not None:
|
151 |
+
with tf.name_scope(self.convolution.name):
|
152 |
+
self.convolution.build([None, None, None, self.in_channels])
|
153 |
+
if getattr(self, "normalization", None) is not None:
|
154 |
+
if hasattr(self.normalization, "name"):
|
155 |
+
with tf.name_scope(self.normalization.name):
|
156 |
+
self.normalization.build([None, None, None, self.out_channels])
|
157 |
+
|
158 |
+
|
159 |
+
class TFMobileViTInvertedResidual(keras.layers.Layer):
|
160 |
+
"""
|
161 |
+
Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
|
162 |
+
"""
|
163 |
+
|
164 |
+
def __init__(
|
165 |
+
self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1, **kwargs
|
166 |
+
) -> None:
|
167 |
+
super().__init__(**kwargs)
|
168 |
+
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
|
169 |
+
|
170 |
+
if stride not in [1, 2]:
|
171 |
+
raise ValueError(f"Invalid stride {stride}.")
|
172 |
+
|
173 |
+
self.use_residual = (stride == 1) and (in_channels == out_channels)
|
174 |
+
|
175 |
+
self.expand_1x1 = TFMobileViTConvLayer(
|
176 |
+
config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1, name="expand_1x1"
|
177 |
+
)
|
178 |
+
|
179 |
+
self.conv_3x3 = TFMobileViTConvLayer(
|
180 |
+
config,
|
181 |
+
in_channels=expanded_channels,
|
182 |
+
out_channels=expanded_channels,
|
183 |
+
kernel_size=3,
|
184 |
+
stride=stride,
|
185 |
+
groups=expanded_channels,
|
186 |
+
dilation=dilation,
|
187 |
+
name="conv_3x3",
|
188 |
+
)
|
189 |
+
|
190 |
+
self.reduce_1x1 = TFMobileViTConvLayer(
|
191 |
+
config,
|
192 |
+
in_channels=expanded_channels,
|
193 |
+
out_channels=out_channels,
|
194 |
+
kernel_size=1,
|
195 |
+
use_activation=False,
|
196 |
+
name="reduce_1x1",
|
197 |
+
)
|
198 |
+
|
199 |
+
def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
|
200 |
+
residual = features
|
201 |
+
|
202 |
+
features = self.expand_1x1(features, training=training)
|
203 |
+
features = self.conv_3x3(features, training=training)
|
204 |
+
features = self.reduce_1x1(features, training=training)
|
205 |
+
|
206 |
+
return residual + features if self.use_residual else features
|
207 |
+
|
208 |
+
def build(self, input_shape=None):
|
209 |
+
if self.built:
|
210 |
+
return
|
211 |
+
self.built = True
|
212 |
+
if getattr(self, "expand_1x1", None) is not None:
|
213 |
+
with tf.name_scope(self.expand_1x1.name):
|
214 |
+
self.expand_1x1.build(None)
|
215 |
+
if getattr(self, "conv_3x3", None) is not None:
|
216 |
+
with tf.name_scope(self.conv_3x3.name):
|
217 |
+
self.conv_3x3.build(None)
|
218 |
+
if getattr(self, "reduce_1x1", None) is not None:
|
219 |
+
with tf.name_scope(self.reduce_1x1.name):
|
220 |
+
self.reduce_1x1.build(None)
|
221 |
+
|
222 |
+
|
223 |
+
class TFMobileViTMobileNetLayer(keras.layers.Layer):
|
224 |
+
def __init__(
|
225 |
+
self,
|
226 |
+
config: MobileViTConfig,
|
227 |
+
in_channels: int,
|
228 |
+
out_channels: int,
|
229 |
+
stride: int = 1,
|
230 |
+
num_stages: int = 1,
|
231 |
+
**kwargs,
|
232 |
+
) -> None:
|
233 |
+
super().__init__(**kwargs)
|
234 |
+
|
235 |
+
self.layers = []
|
236 |
+
for i in range(num_stages):
|
237 |
+
layer = TFMobileViTInvertedResidual(
|
238 |
+
config,
|
239 |
+
in_channels=in_channels,
|
240 |
+
out_channels=out_channels,
|
241 |
+
stride=stride if i == 0 else 1,
|
242 |
+
name=f"layer.{i}",
|
243 |
+
)
|
244 |
+
self.layers.append(layer)
|
245 |
+
in_channels = out_channels
|
246 |
+
|
247 |
+
def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
|
248 |
+
for layer_module in self.layers:
|
249 |
+
features = layer_module(features, training=training)
|
250 |
+
return features
|
251 |
+
|
252 |
+
def build(self, input_shape=None):
|
253 |
+
if self.built:
|
254 |
+
return
|
255 |
+
self.built = True
|
256 |
+
if getattr(self, "layers", None) is not None:
|
257 |
+
for layer_module in self.layers:
|
258 |
+
with tf.name_scope(layer_module.name):
|
259 |
+
layer_module.build(None)
|
260 |
+
|
261 |
+
|
262 |
+
class TFMobileViTSelfAttention(keras.layers.Layer):
|
263 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None:
|
264 |
+
super().__init__(**kwargs)
|
265 |
+
|
266 |
+
if hidden_size % config.num_attention_heads != 0:
|
267 |
+
raise ValueError(
|
268 |
+
f"The hidden size {hidden_size,} is not a multiple of the number of attention "
|
269 |
+
f"heads {config.num_attention_heads}."
|
270 |
+
)
|
271 |
+
|
272 |
+
self.num_attention_heads = config.num_attention_heads
|
273 |
+
self.attention_head_size = int(hidden_size / config.num_attention_heads)
|
274 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
275 |
+
scale = tf.cast(self.attention_head_size, dtype=tf.float32)
|
276 |
+
self.scale = tf.math.sqrt(scale)
|
277 |
+
|
278 |
+
self.query = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="query")
|
279 |
+
self.key = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="key")
|
280 |
+
self.value = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="value")
|
281 |
+
|
282 |
+
self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
|
283 |
+
self.hidden_size = hidden_size
|
284 |
+
|
285 |
+
def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor:
|
286 |
+
batch_size = tf.shape(x)[0]
|
287 |
+
x = tf.reshape(x, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
|
288 |
+
return tf.transpose(x, perm=[0, 2, 1, 3])
|
289 |
+
|
290 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
291 |
+
batch_size = tf.shape(hidden_states)[0]
|
292 |
+
|
293 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
294 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
295 |
+
query_layer = self.transpose_for_scores(self.query(hidden_states))
|
296 |
+
|
297 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
298 |
+
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
|
299 |
+
attention_scores = attention_scores / self.scale
|
300 |
+
|
301 |
+
# Normalize the attention scores to probabilities.
|
302 |
+
attention_probs = stable_softmax(attention_scores, axis=-1)
|
303 |
+
|
304 |
+
# This is actually dropping out entire tokens to attend to, which might
|
305 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
306 |
+
attention_probs = self.dropout(attention_probs, training=training)
|
307 |
+
|
308 |
+
context_layer = tf.matmul(attention_probs, value_layer)
|
309 |
+
|
310 |
+
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
|
311 |
+
context_layer = tf.reshape(context_layer, shape=(batch_size, -1, self.all_head_size))
|
312 |
+
return context_layer
|
313 |
+
|
314 |
+
def build(self, input_shape=None):
|
315 |
+
if self.built:
|
316 |
+
return
|
317 |
+
self.built = True
|
318 |
+
if getattr(self, "query", None) is not None:
|
319 |
+
with tf.name_scope(self.query.name):
|
320 |
+
self.query.build([None, None, self.hidden_size])
|
321 |
+
if getattr(self, "key", None) is not None:
|
322 |
+
with tf.name_scope(self.key.name):
|
323 |
+
self.key.build([None, None, self.hidden_size])
|
324 |
+
if getattr(self, "value", None) is not None:
|
325 |
+
with tf.name_scope(self.value.name):
|
326 |
+
self.value.build([None, None, self.hidden_size])
|
327 |
+
|
328 |
+
|
329 |
+
class TFMobileViTSelfOutput(keras.layers.Layer):
|
330 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None:
|
331 |
+
super().__init__(**kwargs)
|
332 |
+
self.dense = keras.layers.Dense(hidden_size, name="dense")
|
333 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
|
334 |
+
self.hidden_size = hidden_size
|
335 |
+
|
336 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
337 |
+
hidden_states = self.dense(hidden_states)
|
338 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
339 |
+
return hidden_states
|
340 |
+
|
341 |
+
def build(self, input_shape=None):
|
342 |
+
if self.built:
|
343 |
+
return
|
344 |
+
self.built = True
|
345 |
+
if getattr(self, "dense", None) is not None:
|
346 |
+
with tf.name_scope(self.dense.name):
|
347 |
+
self.dense.build([None, None, self.hidden_size])
|
348 |
+
|
349 |
+
|
350 |
+
class TFMobileViTAttention(keras.layers.Layer):
|
351 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None:
|
352 |
+
super().__init__(**kwargs)
|
353 |
+
self.attention = TFMobileViTSelfAttention(config, hidden_size, name="attention")
|
354 |
+
self.dense_output = TFMobileViTSelfOutput(config, hidden_size, name="output")
|
355 |
+
|
356 |
+
def prune_heads(self, heads):
|
357 |
+
raise NotImplementedError
|
358 |
+
|
359 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
360 |
+
self_outputs = self.attention(hidden_states, training=training)
|
361 |
+
attention_output = self.dense_output(self_outputs, training=training)
|
362 |
+
return attention_output
|
363 |
+
|
364 |
+
def build(self, input_shape=None):
|
365 |
+
if self.built:
|
366 |
+
return
|
367 |
+
self.built = True
|
368 |
+
if getattr(self, "attention", None) is not None:
|
369 |
+
with tf.name_scope(self.attention.name):
|
370 |
+
self.attention.build(None)
|
371 |
+
if getattr(self, "dense_output", None) is not None:
|
372 |
+
with tf.name_scope(self.dense_output.name):
|
373 |
+
self.dense_output.build(None)
|
374 |
+
|
375 |
+
|
376 |
+
class TFMobileViTIntermediate(keras.layers.Layer):
|
377 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
|
378 |
+
super().__init__(**kwargs)
|
379 |
+
self.dense = keras.layers.Dense(intermediate_size, name="dense")
|
380 |
+
if isinstance(config.hidden_act, str):
|
381 |
+
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
|
382 |
+
else:
|
383 |
+
self.intermediate_act_fn = config.hidden_act
|
384 |
+
self.hidden_size = hidden_size
|
385 |
+
|
386 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
387 |
+
hidden_states = self.dense(hidden_states)
|
388 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
389 |
+
return hidden_states
|
390 |
+
|
391 |
+
def build(self, input_shape=None):
|
392 |
+
if self.built:
|
393 |
+
return
|
394 |
+
self.built = True
|
395 |
+
if getattr(self, "dense", None) is not None:
|
396 |
+
with tf.name_scope(self.dense.name):
|
397 |
+
self.dense.build([None, None, self.hidden_size])
|
398 |
+
|
399 |
+
|
400 |
+
class TFMobileViTOutput(keras.layers.Layer):
|
401 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
|
402 |
+
super().__init__(**kwargs)
|
403 |
+
self.dense = keras.layers.Dense(hidden_size, name="dense")
|
404 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
|
405 |
+
self.intermediate_size = intermediate_size
|
406 |
+
|
407 |
+
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
|
408 |
+
hidden_states = self.dense(hidden_states)
|
409 |
+
hidden_states = self.dropout(hidden_states, training=training)
|
410 |
+
hidden_states = hidden_states + input_tensor
|
411 |
+
return hidden_states
|
412 |
+
|
413 |
+
def build(self, input_shape=None):
|
414 |
+
if self.built:
|
415 |
+
return
|
416 |
+
self.built = True
|
417 |
+
if getattr(self, "dense", None) is not None:
|
418 |
+
with tf.name_scope(self.dense.name):
|
419 |
+
self.dense.build([None, None, self.intermediate_size])
|
420 |
+
|
421 |
+
|
422 |
+
class TFMobileViTTransformerLayer(keras.layers.Layer):
|
423 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
|
424 |
+
super().__init__(**kwargs)
|
425 |
+
self.attention = TFMobileViTAttention(config, hidden_size, name="attention")
|
426 |
+
self.intermediate = TFMobileViTIntermediate(config, hidden_size, intermediate_size, name="intermediate")
|
427 |
+
self.mobilevit_output = TFMobileViTOutput(config, hidden_size, intermediate_size, name="output")
|
428 |
+
self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before")
|
429 |
+
self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after")
|
430 |
+
self.hidden_size = hidden_size
|
431 |
+
|
432 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
433 |
+
attention_output = self.attention(self.layernorm_before(hidden_states), training=training)
|
434 |
+
hidden_states = attention_output + hidden_states
|
435 |
+
|
436 |
+
layer_output = self.layernorm_after(hidden_states)
|
437 |
+
layer_output = self.intermediate(layer_output)
|
438 |
+
layer_output = self.mobilevit_output(layer_output, hidden_states, training=training)
|
439 |
+
return layer_output
|
440 |
+
|
441 |
+
def build(self, input_shape=None):
|
442 |
+
if self.built:
|
443 |
+
return
|
444 |
+
self.built = True
|
445 |
+
if getattr(self, "attention", None) is not None:
|
446 |
+
with tf.name_scope(self.attention.name):
|
447 |
+
self.attention.build(None)
|
448 |
+
if getattr(self, "intermediate", None) is not None:
|
449 |
+
with tf.name_scope(self.intermediate.name):
|
450 |
+
self.intermediate.build(None)
|
451 |
+
if getattr(self, "mobilevit_output", None) is not None:
|
452 |
+
with tf.name_scope(self.mobilevit_output.name):
|
453 |
+
self.mobilevit_output.build(None)
|
454 |
+
if getattr(self, "layernorm_before", None) is not None:
|
455 |
+
with tf.name_scope(self.layernorm_before.name):
|
456 |
+
self.layernorm_before.build([None, None, self.hidden_size])
|
457 |
+
if getattr(self, "layernorm_after", None) is not None:
|
458 |
+
with tf.name_scope(self.layernorm_after.name):
|
459 |
+
self.layernorm_after.build([None, None, self.hidden_size])
|
460 |
+
|
461 |
+
|
462 |
+
class TFMobileViTTransformer(keras.layers.Layer):
|
463 |
+
def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int, **kwargs) -> None:
|
464 |
+
super().__init__(**kwargs)
|
465 |
+
|
466 |
+
self.layers = []
|
467 |
+
for i in range(num_stages):
|
468 |
+
transformer_layer = TFMobileViTTransformerLayer(
|
469 |
+
config,
|
470 |
+
hidden_size=hidden_size,
|
471 |
+
intermediate_size=int(hidden_size * config.mlp_ratio),
|
472 |
+
name=f"layer.{i}",
|
473 |
+
)
|
474 |
+
self.layers.append(transformer_layer)
|
475 |
+
|
476 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
477 |
+
for layer_module in self.layers:
|
478 |
+
hidden_states = layer_module(hidden_states, training=training)
|
479 |
+
return hidden_states
|
480 |
+
|
481 |
+
def build(self, input_shape=None):
|
482 |
+
if self.built:
|
483 |
+
return
|
484 |
+
self.built = True
|
485 |
+
if getattr(self, "layers", None) is not None:
|
486 |
+
for layer_module in self.layers:
|
487 |
+
with tf.name_scope(layer_module.name):
|
488 |
+
layer_module.build(None)
|
489 |
+
|
490 |
+
|
491 |
+
class TFMobileViTLayer(keras.layers.Layer):
|
492 |
+
"""
|
493 |
+
MobileViT block: https://arxiv.org/abs/2110.02178
|
494 |
+
"""
|
495 |
+
|
496 |
+
def __init__(
|
497 |
+
self,
|
498 |
+
config: MobileViTConfig,
|
499 |
+
in_channels: int,
|
500 |
+
out_channels: int,
|
501 |
+
stride: int,
|
502 |
+
hidden_size: int,
|
503 |
+
num_stages: int,
|
504 |
+
dilation: int = 1,
|
505 |
+
**kwargs,
|
506 |
+
) -> None:
|
507 |
+
super().__init__(**kwargs)
|
508 |
+
self.patch_width = config.patch_size
|
509 |
+
self.patch_height = config.patch_size
|
510 |
+
|
511 |
+
if stride == 2:
|
512 |
+
self.downsampling_layer = TFMobileViTInvertedResidual(
|
513 |
+
config,
|
514 |
+
in_channels=in_channels,
|
515 |
+
out_channels=out_channels,
|
516 |
+
stride=stride if dilation == 1 else 1,
|
517 |
+
dilation=dilation // 2 if dilation > 1 else 1,
|
518 |
+
name="downsampling_layer",
|
519 |
+
)
|
520 |
+
in_channels = out_channels
|
521 |
+
else:
|
522 |
+
self.downsampling_layer = None
|
523 |
+
|
524 |
+
self.conv_kxk = TFMobileViTConvLayer(
|
525 |
+
config,
|
526 |
+
in_channels=in_channels,
|
527 |
+
out_channels=in_channels,
|
528 |
+
kernel_size=config.conv_kernel_size,
|
529 |
+
name="conv_kxk",
|
530 |
+
)
|
531 |
+
|
532 |
+
self.conv_1x1 = TFMobileViTConvLayer(
|
533 |
+
config,
|
534 |
+
in_channels=in_channels,
|
535 |
+
out_channels=hidden_size,
|
536 |
+
kernel_size=1,
|
537 |
+
use_normalization=False,
|
538 |
+
use_activation=False,
|
539 |
+
name="conv_1x1",
|
540 |
+
)
|
541 |
+
|
542 |
+
self.transformer = TFMobileViTTransformer(
|
543 |
+
config, hidden_size=hidden_size, num_stages=num_stages, name="transformer"
|
544 |
+
)
|
545 |
+
|
546 |
+
self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
|
547 |
+
|
548 |
+
self.conv_projection = TFMobileViTConvLayer(
|
549 |
+
config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1, name="conv_projection"
|
550 |
+
)
|
551 |
+
|
552 |
+
self.fusion = TFMobileViTConvLayer(
|
553 |
+
config,
|
554 |
+
in_channels=2 * in_channels,
|
555 |
+
out_channels=in_channels,
|
556 |
+
kernel_size=config.conv_kernel_size,
|
557 |
+
name="fusion",
|
558 |
+
)
|
559 |
+
self.hidden_size = hidden_size
|
560 |
+
|
561 |
+
def unfolding(self, features: tf.Tensor) -> Tuple[tf.Tensor, Dict]:
|
562 |
+
patch_width, patch_height = self.patch_width, self.patch_height
|
563 |
+
patch_area = tf.cast(patch_width * patch_height, "int32")
|
564 |
+
|
565 |
+
batch_size = tf.shape(features)[0]
|
566 |
+
orig_height = tf.shape(features)[1]
|
567 |
+
orig_width = tf.shape(features)[2]
|
568 |
+
channels = tf.shape(features)[3]
|
569 |
+
|
570 |
+
new_height = tf.cast(tf.math.ceil(orig_height / patch_height) * patch_height, "int32")
|
571 |
+
new_width = tf.cast(tf.math.ceil(orig_width / patch_width) * patch_width, "int32")
|
572 |
+
|
573 |
+
interpolate = new_width != orig_width or new_height != orig_height
|
574 |
+
if interpolate:
|
575 |
+
# Note: Padding can be done, but then it needs to be handled in attention function.
|
576 |
+
features = tf.image.resize(features, size=(new_height, new_width), method="bilinear")
|
577 |
+
|
578 |
+
# number of patches along width and height
|
579 |
+
num_patch_width = new_width // patch_width
|
580 |
+
num_patch_height = new_height // patch_height
|
581 |
+
num_patches = num_patch_height * num_patch_width
|
582 |
+
|
583 |
+
# convert from shape (batch_size, orig_height, orig_width, channels)
|
584 |
+
# to the shape (batch_size * patch_area, num_patches, channels)
|
585 |
+
features = tf.transpose(features, [0, 3, 1, 2])
|
586 |
+
patches = tf.reshape(
|
587 |
+
features, (batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width)
|
588 |
+
)
|
589 |
+
patches = tf.transpose(patches, [0, 2, 1, 3])
|
590 |
+
patches = tf.reshape(patches, (batch_size, channels, num_patches, patch_area))
|
591 |
+
patches = tf.transpose(patches, [0, 3, 2, 1])
|
592 |
+
patches = tf.reshape(patches, (batch_size * patch_area, num_patches, channels))
|
593 |
+
|
594 |
+
info_dict = {
|
595 |
+
"orig_size": (orig_height, orig_width),
|
596 |
+
"batch_size": batch_size,
|
597 |
+
"channels": channels,
|
598 |
+
"interpolate": interpolate,
|
599 |
+
"num_patches": num_patches,
|
600 |
+
"num_patches_width": num_patch_width,
|
601 |
+
"num_patches_height": num_patch_height,
|
602 |
+
}
|
603 |
+
return patches, info_dict
|
604 |
+
|
605 |
+
def folding(self, patches: tf.Tensor, info_dict: Dict) -> tf.Tensor:
|
606 |
+
patch_width, patch_height = self.patch_width, self.patch_height
|
607 |
+
patch_area = int(patch_width * patch_height)
|
608 |
+
|
609 |
+
batch_size = info_dict["batch_size"]
|
610 |
+
channels = info_dict["channels"]
|
611 |
+
num_patches = info_dict["num_patches"]
|
612 |
+
num_patch_height = info_dict["num_patches_height"]
|
613 |
+
num_patch_width = info_dict["num_patches_width"]
|
614 |
+
|
615 |
+
# convert from shape (batch_size * patch_area, num_patches, channels)
|
616 |
+
# back to shape (batch_size, channels, orig_height, orig_width)
|
617 |
+
features = tf.reshape(patches, (batch_size, patch_area, num_patches, -1))
|
618 |
+
features = tf.transpose(features, perm=(0, 3, 2, 1))
|
619 |
+
features = tf.reshape(
|
620 |
+
features, (batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width)
|
621 |
+
)
|
622 |
+
features = tf.transpose(features, perm=(0, 2, 1, 3))
|
623 |
+
features = tf.reshape(
|
624 |
+
features, (batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width)
|
625 |
+
)
|
626 |
+
features = tf.transpose(features, perm=(0, 2, 3, 1))
|
627 |
+
|
628 |
+
if info_dict["interpolate"]:
|
629 |
+
features = tf.image.resize(features, size=info_dict["orig_size"], method="bilinear")
|
630 |
+
|
631 |
+
return features
|
632 |
+
|
633 |
+
def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
|
634 |
+
# reduce spatial dimensions if needed
|
635 |
+
if self.downsampling_layer:
|
636 |
+
features = self.downsampling_layer(features, training=training)
|
637 |
+
|
638 |
+
residual = features
|
639 |
+
|
640 |
+
# local representation
|
641 |
+
features = self.conv_kxk(features, training=training)
|
642 |
+
features = self.conv_1x1(features, training=training)
|
643 |
+
|
644 |
+
# convert feature map to patches
|
645 |
+
patches, info_dict = self.unfolding(features)
|
646 |
+
|
647 |
+
# learn global representations
|
648 |
+
patches = self.transformer(patches, training=training)
|
649 |
+
patches = self.layernorm(patches)
|
650 |
+
|
651 |
+
# convert patches back to feature maps
|
652 |
+
features = self.folding(patches, info_dict)
|
653 |
+
|
654 |
+
features = self.conv_projection(features, training=training)
|
655 |
+
features = self.fusion(tf.concat([residual, features], axis=-1), training=training)
|
656 |
+
return features
|
657 |
+
|
658 |
+
def build(self, input_shape=None):
|
659 |
+
if self.built:
|
660 |
+
return
|
661 |
+
self.built = True
|
662 |
+
if getattr(self, "conv_kxk", None) is not None:
|
663 |
+
with tf.name_scope(self.conv_kxk.name):
|
664 |
+
self.conv_kxk.build(None)
|
665 |
+
if getattr(self, "conv_1x1", None) is not None:
|
666 |
+
with tf.name_scope(self.conv_1x1.name):
|
667 |
+
self.conv_1x1.build(None)
|
668 |
+
if getattr(self, "transformer", None) is not None:
|
669 |
+
with tf.name_scope(self.transformer.name):
|
670 |
+
self.transformer.build(None)
|
671 |
+
if getattr(self, "layernorm", None) is not None:
|
672 |
+
with tf.name_scope(self.layernorm.name):
|
673 |
+
self.layernorm.build([None, None, self.hidden_size])
|
674 |
+
if getattr(self, "conv_projection", None) is not None:
|
675 |
+
with tf.name_scope(self.conv_projection.name):
|
676 |
+
self.conv_projection.build(None)
|
677 |
+
if getattr(self, "fusion", None) is not None:
|
678 |
+
with tf.name_scope(self.fusion.name):
|
679 |
+
self.fusion.build(None)
|
680 |
+
if getattr(self, "downsampling_layer", None) is not None:
|
681 |
+
with tf.name_scope(self.downsampling_layer.name):
|
682 |
+
self.downsampling_layer.build(None)
|
683 |
+
|
684 |
+
|
685 |
+
class TFMobileViTEncoder(keras.layers.Layer):
|
686 |
+
def __init__(self, config: MobileViTConfig, **kwargs) -> None:
|
687 |
+
super().__init__(**kwargs)
|
688 |
+
self.config = config
|
689 |
+
|
690 |
+
self.layers = []
|
691 |
+
|
692 |
+
# segmentation architectures like DeepLab and PSPNet modify the strides
|
693 |
+
# of the classification backbones
|
694 |
+
dilate_layer_4 = dilate_layer_5 = False
|
695 |
+
if config.output_stride == 8:
|
696 |
+
dilate_layer_4 = True
|
697 |
+
dilate_layer_5 = True
|
698 |
+
elif config.output_stride == 16:
|
699 |
+
dilate_layer_5 = True
|
700 |
+
|
701 |
+
dilation = 1
|
702 |
+
|
703 |
+
layer_1 = TFMobileViTMobileNetLayer(
|
704 |
+
config,
|
705 |
+
in_channels=config.neck_hidden_sizes[0],
|
706 |
+
out_channels=config.neck_hidden_sizes[1],
|
707 |
+
stride=1,
|
708 |
+
num_stages=1,
|
709 |
+
name="layer.0",
|
710 |
+
)
|
711 |
+
self.layers.append(layer_1)
|
712 |
+
|
713 |
+
layer_2 = TFMobileViTMobileNetLayer(
|
714 |
+
config,
|
715 |
+
in_channels=config.neck_hidden_sizes[1],
|
716 |
+
out_channels=config.neck_hidden_sizes[2],
|
717 |
+
stride=2,
|
718 |
+
num_stages=3,
|
719 |
+
name="layer.1",
|
720 |
+
)
|
721 |
+
self.layers.append(layer_2)
|
722 |
+
|
723 |
+
layer_3 = TFMobileViTLayer(
|
724 |
+
config,
|
725 |
+
in_channels=config.neck_hidden_sizes[2],
|
726 |
+
out_channels=config.neck_hidden_sizes[3],
|
727 |
+
stride=2,
|
728 |
+
hidden_size=config.hidden_sizes[0],
|
729 |
+
num_stages=2,
|
730 |
+
name="layer.2",
|
731 |
+
)
|
732 |
+
self.layers.append(layer_3)
|
733 |
+
|
734 |
+
if dilate_layer_4:
|
735 |
+
dilation *= 2
|
736 |
+
|
737 |
+
layer_4 = TFMobileViTLayer(
|
738 |
+
config,
|
739 |
+
in_channels=config.neck_hidden_sizes[3],
|
740 |
+
out_channels=config.neck_hidden_sizes[4],
|
741 |
+
stride=2,
|
742 |
+
hidden_size=config.hidden_sizes[1],
|
743 |
+
num_stages=4,
|
744 |
+
dilation=dilation,
|
745 |
+
name="layer.3",
|
746 |
+
)
|
747 |
+
self.layers.append(layer_4)
|
748 |
+
|
749 |
+
if dilate_layer_5:
|
750 |
+
dilation *= 2
|
751 |
+
|
752 |
+
layer_5 = TFMobileViTLayer(
|
753 |
+
config,
|
754 |
+
in_channels=config.neck_hidden_sizes[4],
|
755 |
+
out_channels=config.neck_hidden_sizes[5],
|
756 |
+
stride=2,
|
757 |
+
hidden_size=config.hidden_sizes[2],
|
758 |
+
num_stages=3,
|
759 |
+
dilation=dilation,
|
760 |
+
name="layer.4",
|
761 |
+
)
|
762 |
+
self.layers.append(layer_5)
|
763 |
+
|
764 |
+
def call(
|
765 |
+
self,
|
766 |
+
hidden_states: tf.Tensor,
|
767 |
+
output_hidden_states: bool = False,
|
768 |
+
return_dict: bool = True,
|
769 |
+
training: bool = False,
|
770 |
+
) -> Union[tuple, TFBaseModelOutput]:
|
771 |
+
all_hidden_states = () if output_hidden_states else None
|
772 |
+
|
773 |
+
for i, layer_module in enumerate(self.layers):
|
774 |
+
hidden_states = layer_module(hidden_states, training=training)
|
775 |
+
|
776 |
+
if output_hidden_states:
|
777 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
778 |
+
|
779 |
+
if not return_dict:
|
780 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
781 |
+
|
782 |
+
return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
783 |
+
|
784 |
+
def build(self, input_shape=None):
|
785 |
+
if self.built:
|
786 |
+
return
|
787 |
+
self.built = True
|
788 |
+
if getattr(self, "layers", None) is not None:
|
789 |
+
for layer_module in self.layers:
|
790 |
+
with tf.name_scope(layer_module.name):
|
791 |
+
layer_module.build(None)
|
792 |
+
|
793 |
+
|
794 |
+
@keras_serializable
|
795 |
+
class TFMobileViTMainLayer(keras.layers.Layer):
|
796 |
+
config_class = MobileViTConfig
|
797 |
+
|
798 |
+
def __init__(self, config: MobileViTConfig, expand_output: bool = True, **kwargs):
|
799 |
+
super().__init__(**kwargs)
|
800 |
+
self.config = config
|
801 |
+
self.expand_output = expand_output
|
802 |
+
|
803 |
+
self.conv_stem = TFMobileViTConvLayer(
|
804 |
+
config,
|
805 |
+
in_channels=config.num_channels,
|
806 |
+
out_channels=config.neck_hidden_sizes[0],
|
807 |
+
kernel_size=3,
|
808 |
+
stride=2,
|
809 |
+
name="conv_stem",
|
810 |
+
)
|
811 |
+
|
812 |
+
self.encoder = TFMobileViTEncoder(config, name="encoder")
|
813 |
+
|
814 |
+
if self.expand_output:
|
815 |
+
self.conv_1x1_exp = TFMobileViTConvLayer(
|
816 |
+
config,
|
817 |
+
in_channels=config.neck_hidden_sizes[5],
|
818 |
+
out_channels=config.neck_hidden_sizes[6],
|
819 |
+
kernel_size=1,
|
820 |
+
name="conv_1x1_exp",
|
821 |
+
)
|
822 |
+
|
823 |
+
self.pooler = keras.layers.GlobalAveragePooling2D(data_format="channels_first", name="pooler")
|
824 |
+
|
825 |
+
def _prune_heads(self, heads_to_prune):
|
826 |
+
"""
|
827 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
828 |
+
class PreTrainedModel
|
829 |
+
"""
|
830 |
+
raise NotImplementedError
|
831 |
+
|
832 |
+
@unpack_inputs
|
833 |
+
def call(
|
834 |
+
self,
|
835 |
+
pixel_values: tf.Tensor | None = None,
|
836 |
+
output_hidden_states: Optional[bool] = None,
|
837 |
+
return_dict: Optional[bool] = None,
|
838 |
+
training: bool = False,
|
839 |
+
) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPooling]:
|
840 |
+
output_hidden_states = (
|
841 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
842 |
+
)
|
843 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
844 |
+
|
845 |
+
# When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
|
846 |
+
# So change the input format from `NCHW` to `NHWC`.
|
847 |
+
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
|
848 |
+
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
|
849 |
+
|
850 |
+
embedding_output = self.conv_stem(pixel_values, training=training)
|
851 |
+
|
852 |
+
encoder_outputs = self.encoder(
|
853 |
+
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
|
854 |
+
)
|
855 |
+
|
856 |
+
if self.expand_output:
|
857 |
+
last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
|
858 |
+
|
859 |
+
# Change to NCHW output format to have uniformity in the modules
|
860 |
+
last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2])
|
861 |
+
|
862 |
+
# global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
|
863 |
+
pooled_output = self.pooler(last_hidden_state)
|
864 |
+
else:
|
865 |
+
last_hidden_state = encoder_outputs[0]
|
866 |
+
# Change to NCHW output format to have uniformity in the modules
|
867 |
+
last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2])
|
868 |
+
pooled_output = None
|
869 |
+
|
870 |
+
if not return_dict:
|
871 |
+
output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
|
872 |
+
|
873 |
+
# Change to NCHW output format to have uniformity in the modules
|
874 |
+
if not self.expand_output:
|
875 |
+
remaining_encoder_outputs = encoder_outputs[1:]
|
876 |
+
remaining_encoder_outputs = tuple(
|
877 |
+
[tf.transpose(h, perm=(0, 3, 1, 2)) for h in remaining_encoder_outputs[0]]
|
878 |
+
)
|
879 |
+
remaining_encoder_outputs = (remaining_encoder_outputs,)
|
880 |
+
return output + remaining_encoder_outputs
|
881 |
+
else:
|
882 |
+
return output + encoder_outputs[1:]
|
883 |
+
|
884 |
+
# Change the other hidden state outputs to NCHW as well
|
885 |
+
if output_hidden_states:
|
886 |
+
hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
|
887 |
+
|
888 |
+
return TFBaseModelOutputWithPooling(
|
889 |
+
last_hidden_state=last_hidden_state,
|
890 |
+
pooler_output=pooled_output,
|
891 |
+
hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
|
892 |
+
)
|
893 |
+
|
894 |
+
def build(self, input_shape=None):
|
895 |
+
if self.built:
|
896 |
+
return
|
897 |
+
self.built = True
|
898 |
+
if getattr(self, "conv_stem", None) is not None:
|
899 |
+
with tf.name_scope(self.conv_stem.name):
|
900 |
+
self.conv_stem.build(None)
|
901 |
+
if getattr(self, "encoder", None) is not None:
|
902 |
+
with tf.name_scope(self.encoder.name):
|
903 |
+
self.encoder.build(None)
|
904 |
+
if getattr(self, "pooler", None) is not None:
|
905 |
+
with tf.name_scope(self.pooler.name):
|
906 |
+
self.pooler.build([None, None, None, None])
|
907 |
+
if getattr(self, "conv_1x1_exp", None) is not None:
|
908 |
+
with tf.name_scope(self.conv_1x1_exp.name):
|
909 |
+
self.conv_1x1_exp.build(None)
|
910 |
+
|
911 |
+
|
912 |
+
class TFMobileViTPreTrainedModel(TFPreTrainedModel):
|
913 |
+
"""
|
914 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
915 |
+
models.
|
916 |
+
"""
|
917 |
+
|
918 |
+
config_class = MobileViTConfig
|
919 |
+
base_model_prefix = "mobilevit"
|
920 |
+
main_input_name = "pixel_values"
|
921 |
+
|
922 |
+
|
923 |
+
MOBILEVIT_START_DOCSTRING = r"""
|
924 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
925 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
926 |
+
etc.)
|
927 |
+
|
928 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
929 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
930 |
+
behavior.
|
931 |
+
|
932 |
+
<Tip>
|
933 |
+
|
934 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
935 |
+
|
936 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
937 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
938 |
+
|
939 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
940 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
941 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
942 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
943 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
944 |
+
positional argument:
|
945 |
+
|
946 |
+
- a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
|
947 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
948 |
+
`model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
|
949 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
950 |
+
`model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
|
951 |
+
|
952 |
+
Note that when creating models and layers with
|
953 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
954 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
955 |
+
|
956 |
+
</Tip>
|
957 |
+
|
958 |
+
Parameters:
|
959 |
+
config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model.
|
960 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
961 |
+
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
|
962 |
+
"""
|
963 |
+
|
964 |
+
MOBILEVIT_INPUTS_DOCSTRING = r"""
|
965 |
+
Args:
|
966 |
+
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
|
967 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
968 |
+
[`MobileViTImageProcessor.__call__`] for details.
|
969 |
+
|
970 |
+
output_hidden_states (`bool`, *optional*):
|
971 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
972 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
973 |
+
used instead.
|
974 |
+
return_dict (`bool`, *optional*):
|
975 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
|
976 |
+
eager mode, in graph mode the value will always be set to True.
|
977 |
+
"""
|
978 |
+
|
979 |
+
|
980 |
+
@add_start_docstrings(
|
981 |
+
"The bare MobileViT model outputting raw hidden-states without any specific head on top.",
|
982 |
+
MOBILEVIT_START_DOCSTRING,
|
983 |
+
)
|
984 |
+
class TFMobileViTModel(TFMobileViTPreTrainedModel):
|
985 |
+
def __init__(self, config: MobileViTConfig, expand_output: bool = True, *inputs, **kwargs):
|
986 |
+
super().__init__(config, *inputs, **kwargs)
|
987 |
+
self.config = config
|
988 |
+
self.expand_output = expand_output
|
989 |
+
|
990 |
+
self.mobilevit = TFMobileViTMainLayer(config, expand_output=expand_output, name="mobilevit")
|
991 |
+
|
992 |
+
@unpack_inputs
|
993 |
+
@add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
|
994 |
+
@add_code_sample_docstrings(
|
995 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
996 |
+
output_type=TFBaseModelOutputWithPooling,
|
997 |
+
config_class=_CONFIG_FOR_DOC,
|
998 |
+
modality="vision",
|
999 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
1000 |
+
)
|
1001 |
+
def call(
|
1002 |
+
self,
|
1003 |
+
pixel_values: tf.Tensor | None = None,
|
1004 |
+
output_hidden_states: Optional[bool] = None,
|
1005 |
+
return_dict: Optional[bool] = None,
|
1006 |
+
training: bool = False,
|
1007 |
+
) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPooling]:
|
1008 |
+
output = self.mobilevit(pixel_values, output_hidden_states, return_dict, training=training)
|
1009 |
+
return output
|
1010 |
+
|
1011 |
+
def build(self, input_shape=None):
|
1012 |
+
if self.built:
|
1013 |
+
return
|
1014 |
+
self.built = True
|
1015 |
+
if getattr(self, "mobilevit", None) is not None:
|
1016 |
+
with tf.name_scope(self.mobilevit.name):
|
1017 |
+
self.mobilevit.build(None)
|
1018 |
+
|
1019 |
+
|
1020 |
+
@add_start_docstrings(
|
1021 |
+
"""
|
1022 |
+
MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
|
1023 |
+
ImageNet.
|
1024 |
+
""",
|
1025 |
+
MOBILEVIT_START_DOCSTRING,
|
1026 |
+
)
|
1027 |
+
class TFMobileViTForImageClassification(TFMobileViTPreTrainedModel, TFSequenceClassificationLoss):
|
1028 |
+
def __init__(self, config: MobileViTConfig, *inputs, **kwargs) -> None:
|
1029 |
+
super().__init__(config, *inputs, **kwargs)
|
1030 |
+
|
1031 |
+
self.num_labels = config.num_labels
|
1032 |
+
self.mobilevit = TFMobileViTMainLayer(config, name="mobilevit")
|
1033 |
+
|
1034 |
+
# Classifier head
|
1035 |
+
self.dropout = keras.layers.Dropout(config.classifier_dropout_prob)
|
1036 |
+
self.classifier = (
|
1037 |
+
keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else tf.identity
|
1038 |
+
)
|
1039 |
+
self.config = config
|
1040 |
+
|
1041 |
+
@unpack_inputs
|
1042 |
+
@add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
|
1043 |
+
@add_code_sample_docstrings(
|
1044 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
1045 |
+
output_type=TFImageClassifierOutputWithNoAttention,
|
1046 |
+
config_class=_CONFIG_FOR_DOC,
|
1047 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
1048 |
+
)
|
1049 |
+
def call(
|
1050 |
+
self,
|
1051 |
+
pixel_values: tf.Tensor | None = None,
|
1052 |
+
output_hidden_states: Optional[bool] = None,
|
1053 |
+
labels: tf.Tensor | None = None,
|
1054 |
+
return_dict: Optional[bool] = None,
|
1055 |
+
training: Optional[bool] = False,
|
1056 |
+
) -> Union[tuple, TFImageClassifierOutputWithNoAttention]:
|
1057 |
+
r"""
|
1058 |
+
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
|
1059 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
1060 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
|
1061 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1062 |
+
"""
|
1063 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1064 |
+
|
1065 |
+
outputs = self.mobilevit(
|
1066 |
+
pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
|
1067 |
+
)
|
1068 |
+
|
1069 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
1070 |
+
|
1071 |
+
logits = self.classifier(self.dropout(pooled_output, training=training))
|
1072 |
+
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
|
1073 |
+
|
1074 |
+
if not return_dict:
|
1075 |
+
output = (logits,) + outputs[2:]
|
1076 |
+
return ((loss,) + output) if loss is not None else output
|
1077 |
+
|
1078 |
+
return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
1079 |
+
|
1080 |
+
def build(self, input_shape=None):
|
1081 |
+
if self.built:
|
1082 |
+
return
|
1083 |
+
self.built = True
|
1084 |
+
if getattr(self, "mobilevit", None) is not None:
|
1085 |
+
with tf.name_scope(self.mobilevit.name):
|
1086 |
+
self.mobilevit.build(None)
|
1087 |
+
if getattr(self, "classifier", None) is not None:
|
1088 |
+
if hasattr(self.classifier, "name"):
|
1089 |
+
with tf.name_scope(self.classifier.name):
|
1090 |
+
self.classifier.build([None, None, self.config.neck_hidden_sizes[-1]])
|
1091 |
+
|
1092 |
+
|
1093 |
+
class TFMobileViTASPPPooling(keras.layers.Layer):
|
1094 |
+
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, **kwargs) -> None:
|
1095 |
+
super().__init__(**kwargs)
|
1096 |
+
|
1097 |
+
self.global_pool = keras.layers.GlobalAveragePooling2D(keepdims=True, name="global_pool")
|
1098 |
+
|
1099 |
+
self.conv_1x1 = TFMobileViTConvLayer(
|
1100 |
+
config,
|
1101 |
+
in_channels=in_channels,
|
1102 |
+
out_channels=out_channels,
|
1103 |
+
kernel_size=1,
|
1104 |
+
stride=1,
|
1105 |
+
use_normalization=True,
|
1106 |
+
use_activation="relu",
|
1107 |
+
name="conv_1x1",
|
1108 |
+
)
|
1109 |
+
|
1110 |
+
def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
|
1111 |
+
spatial_size = shape_list(features)[1:-1]
|
1112 |
+
features = self.global_pool(features)
|
1113 |
+
features = self.conv_1x1(features, training=training)
|
1114 |
+
features = tf.image.resize(features, size=spatial_size, method="bilinear")
|
1115 |
+
return features
|
1116 |
+
|
1117 |
+
def build(self, input_shape=None):
|
1118 |
+
if self.built:
|
1119 |
+
return
|
1120 |
+
self.built = True
|
1121 |
+
if getattr(self, "global_pool", None) is not None:
|
1122 |
+
with tf.name_scope(self.global_pool.name):
|
1123 |
+
self.global_pool.build([None, None, None, None])
|
1124 |
+
if getattr(self, "conv_1x1", None) is not None:
|
1125 |
+
with tf.name_scope(self.conv_1x1.name):
|
1126 |
+
self.conv_1x1.build(None)
|
1127 |
+
|
1128 |
+
|
1129 |
+
class TFMobileViTASPP(keras.layers.Layer):
|
1130 |
+
"""
|
1131 |
+
ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
|
1132 |
+
"""
|
1133 |
+
|
1134 |
+
def __init__(self, config: MobileViTConfig, **kwargs) -> None:
|
1135 |
+
super().__init__(**kwargs)
|
1136 |
+
|
1137 |
+
in_channels = config.neck_hidden_sizes[-2]
|
1138 |
+
out_channels = config.aspp_out_channels
|
1139 |
+
|
1140 |
+
if len(config.atrous_rates) != 3:
|
1141 |
+
raise ValueError("Expected 3 values for atrous_rates")
|
1142 |
+
|
1143 |
+
self.convs = []
|
1144 |
+
|
1145 |
+
in_projection = TFMobileViTConvLayer(
|
1146 |
+
config,
|
1147 |
+
in_channels=in_channels,
|
1148 |
+
out_channels=out_channels,
|
1149 |
+
kernel_size=1,
|
1150 |
+
use_activation="relu",
|
1151 |
+
name="convs.0",
|
1152 |
+
)
|
1153 |
+
self.convs.append(in_projection)
|
1154 |
+
|
1155 |
+
self.convs.extend(
|
1156 |
+
[
|
1157 |
+
TFMobileViTConvLayer(
|
1158 |
+
config,
|
1159 |
+
in_channels=in_channels,
|
1160 |
+
out_channels=out_channels,
|
1161 |
+
kernel_size=3,
|
1162 |
+
dilation=rate,
|
1163 |
+
use_activation="relu",
|
1164 |
+
name=f"convs.{i + 1}",
|
1165 |
+
)
|
1166 |
+
for i, rate in enumerate(config.atrous_rates)
|
1167 |
+
]
|
1168 |
+
)
|
1169 |
+
|
1170 |
+
pool_layer = TFMobileViTASPPPooling(
|
1171 |
+
config, in_channels, out_channels, name=f"convs.{len(config.atrous_rates) + 1}"
|
1172 |
+
)
|
1173 |
+
self.convs.append(pool_layer)
|
1174 |
+
|
1175 |
+
self.project = TFMobileViTConvLayer(
|
1176 |
+
config,
|
1177 |
+
in_channels=5 * out_channels,
|
1178 |
+
out_channels=out_channels,
|
1179 |
+
kernel_size=1,
|
1180 |
+
use_activation="relu",
|
1181 |
+
name="project",
|
1182 |
+
)
|
1183 |
+
|
1184 |
+
self.dropout = keras.layers.Dropout(config.aspp_dropout_prob)
|
1185 |
+
|
1186 |
+
def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
|
1187 |
+
# since the hidden states were transposed to have `(batch_size, channels, height, width)`
|
1188 |
+
# layout we transpose them back to have `(batch_size, height, width, channels)` layout.
|
1189 |
+
features = tf.transpose(features, perm=[0, 2, 3, 1])
|
1190 |
+
pyramid = []
|
1191 |
+
for conv in self.convs:
|
1192 |
+
pyramid.append(conv(features, training=training))
|
1193 |
+
pyramid = tf.concat(pyramid, axis=-1)
|
1194 |
+
|
1195 |
+
pooled_features = self.project(pyramid, training=training)
|
1196 |
+
pooled_features = self.dropout(pooled_features, training=training)
|
1197 |
+
return pooled_features
|
1198 |
+
|
1199 |
+
def build(self, input_shape=None):
|
1200 |
+
if self.built:
|
1201 |
+
return
|
1202 |
+
self.built = True
|
1203 |
+
if getattr(self, "project", None) is not None:
|
1204 |
+
with tf.name_scope(self.project.name):
|
1205 |
+
self.project.build(None)
|
1206 |
+
if getattr(self, "convs", None) is not None:
|
1207 |
+
for conv in self.convs:
|
1208 |
+
with tf.name_scope(conv.name):
|
1209 |
+
conv.build(None)
|
1210 |
+
|
1211 |
+
|
1212 |
+
class TFMobileViTDeepLabV3(keras.layers.Layer):
|
1213 |
+
"""
|
1214 |
+
DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
|
1215 |
+
"""
|
1216 |
+
|
1217 |
+
def __init__(self, config: MobileViTConfig, **kwargs) -> None:
|
1218 |
+
super().__init__(**kwargs)
|
1219 |
+
self.aspp = TFMobileViTASPP(config, name="aspp")
|
1220 |
+
|
1221 |
+
self.dropout = keras.layers.Dropout(config.classifier_dropout_prob)
|
1222 |
+
|
1223 |
+
self.classifier = TFMobileViTConvLayer(
|
1224 |
+
config,
|
1225 |
+
in_channels=config.aspp_out_channels,
|
1226 |
+
out_channels=config.num_labels,
|
1227 |
+
kernel_size=1,
|
1228 |
+
use_normalization=False,
|
1229 |
+
use_activation=False,
|
1230 |
+
bias=True,
|
1231 |
+
name="classifier",
|
1232 |
+
)
|
1233 |
+
|
1234 |
+
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
|
1235 |
+
features = self.aspp(hidden_states[-1], training=training)
|
1236 |
+
features = self.dropout(features, training=training)
|
1237 |
+
features = self.classifier(features, training=training)
|
1238 |
+
return features
|
1239 |
+
|
1240 |
+
def build(self, input_shape=None):
|
1241 |
+
if self.built:
|
1242 |
+
return
|
1243 |
+
self.built = True
|
1244 |
+
if getattr(self, "aspp", None) is not None:
|
1245 |
+
with tf.name_scope(self.aspp.name):
|
1246 |
+
self.aspp.build(None)
|
1247 |
+
if getattr(self, "classifier", None) is not None:
|
1248 |
+
with tf.name_scope(self.classifier.name):
|
1249 |
+
self.classifier.build(None)
|
1250 |
+
|
1251 |
+
|
1252 |
+
@add_start_docstrings(
|
1253 |
+
"""
|
1254 |
+
MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC.
|
1255 |
+
""",
|
1256 |
+
MOBILEVIT_START_DOCSTRING,
|
1257 |
+
)
|
1258 |
+
class TFMobileViTForSemanticSegmentation(TFMobileViTPreTrainedModel):
|
1259 |
+
def __init__(self, config: MobileViTConfig, **kwargs) -> None:
|
1260 |
+
super().__init__(config, **kwargs)
|
1261 |
+
|
1262 |
+
self.num_labels = config.num_labels
|
1263 |
+
self.mobilevit = TFMobileViTMainLayer(config, expand_output=False, name="mobilevit")
|
1264 |
+
self.segmentation_head = TFMobileViTDeepLabV3(config, name="segmentation_head")
|
1265 |
+
|
1266 |
+
def hf_compute_loss(self, logits, labels):
|
1267 |
+
# upsample logits to the images' original size
|
1268 |
+
# `labels` is of shape (batch_size, height, width)
|
1269 |
+
label_interp_shape = shape_list(labels)[1:]
|
1270 |
+
|
1271 |
+
upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear")
|
1272 |
+
# compute weighted loss
|
1273 |
+
loss_fct = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none")
|
1274 |
+
|
1275 |
+
def masked_loss(real, pred):
|
1276 |
+
unmasked_loss = loss_fct(real, pred)
|
1277 |
+
mask = tf.cast(real != self.config.semantic_loss_ignore_index, dtype=unmasked_loss.dtype)
|
1278 |
+
masked_loss = unmasked_loss * mask
|
1279 |
+
# Reduction strategy in the similar spirit with
|
1280 |
+
# https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_utils.py#L210
|
1281 |
+
reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(mask)
|
1282 |
+
return tf.reshape(reduced_masked_loss, (1,))
|
1283 |
+
|
1284 |
+
return masked_loss(labels, upsampled_logits)
|
1285 |
+
|
1286 |
+
@unpack_inputs
|
1287 |
+
@add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
|
1288 |
+
@replace_return_docstrings(output_type=TFSemanticSegmenterOutputWithNoAttention, config_class=_CONFIG_FOR_DOC)
|
1289 |
+
def call(
|
1290 |
+
self,
|
1291 |
+
pixel_values: tf.Tensor | None = None,
|
1292 |
+
labels: tf.Tensor | None = None,
|
1293 |
+
output_hidden_states: Optional[bool] = None,
|
1294 |
+
return_dict: Optional[bool] = None,
|
1295 |
+
training: bool = False,
|
1296 |
+
) -> Union[tuple, TFSemanticSegmenterOutputWithNoAttention]:
|
1297 |
+
r"""
|
1298 |
+
labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*):
|
1299 |
+
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
|
1300 |
+
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
|
1301 |
+
|
1302 |
+
Returns:
|
1303 |
+
|
1304 |
+
Examples:
|
1305 |
+
|
1306 |
+
```python
|
1307 |
+
>>> from transformers import AutoImageProcessor, TFMobileViTForSemanticSegmentation
|
1308 |
+
>>> from PIL import Image
|
1309 |
+
>>> import requests
|
1310 |
+
|
1311 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1312 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1313 |
+
|
1314 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small")
|
1315 |
+
>>> model = TFMobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small")
|
1316 |
+
|
1317 |
+
>>> inputs = image_processor(images=image, return_tensors="tf")
|
1318 |
+
|
1319 |
+
>>> outputs = model(**inputs)
|
1320 |
+
|
1321 |
+
>>> # logits are of shape (batch_size, num_labels, height, width)
|
1322 |
+
>>> logits = outputs.logits
|
1323 |
+
```"""
|
1324 |
+
output_hidden_states = (
|
1325 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1326 |
+
)
|
1327 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1328 |
+
|
1329 |
+
outputs = self.mobilevit(
|
1330 |
+
pixel_values,
|
1331 |
+
output_hidden_states=True, # we need the intermediate hidden states
|
1332 |
+
return_dict=return_dict,
|
1333 |
+
training=training,
|
1334 |
+
)
|
1335 |
+
|
1336 |
+
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
1337 |
+
|
1338 |
+
logits = self.segmentation_head(encoder_hidden_states, training=training)
|
1339 |
+
|
1340 |
+
loss = None
|
1341 |
+
if labels is not None:
|
1342 |
+
if not self.config.num_labels > 1:
|
1343 |
+
raise ValueError("The number of labels should be greater than one")
|
1344 |
+
else:
|
1345 |
+
loss = self.hf_compute_loss(logits=logits, labels=labels)
|
1346 |
+
|
1347 |
+
# make logits of shape (batch_size, num_labels, height, width) to
|
1348 |
+
# keep them consistent across APIs
|
1349 |
+
logits = tf.transpose(logits, perm=[0, 3, 1, 2])
|
1350 |
+
|
1351 |
+
if not return_dict:
|
1352 |
+
if output_hidden_states:
|
1353 |
+
output = (logits,) + outputs[1:]
|
1354 |
+
else:
|
1355 |
+
output = (logits,) + outputs[2:]
|
1356 |
+
return ((loss,) + output) if loss is not None else output
|
1357 |
+
|
1358 |
+
return TFSemanticSegmenterOutputWithNoAttention(
|
1359 |
+
loss=loss,
|
1360 |
+
logits=logits,
|
1361 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
1362 |
+
)
|
1363 |
+
|
1364 |
+
def build(self, input_shape=None):
|
1365 |
+
if self.built:
|
1366 |
+
return
|
1367 |
+
self.built = True
|
1368 |
+
if getattr(self, "mobilevit", None) is not None:
|
1369 |
+
with tf.name_scope(self.mobilevit.name):
|
1370 |
+
self.mobilevit.build(None)
|
1371 |
+
if getattr(self, "segmentation_head", None) is not None:
|
1372 |
+
with tf.name_scope(self.segmentation_head.name):
|
1373 |
+
self.segmentation_head.build(None)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__init__.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {
|
20 |
+
"configuration_seggpt": ["SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegGptConfig", "SegGptOnnxConfig"]
|
21 |
+
}
|
22 |
+
|
23 |
+
try:
|
24 |
+
if not is_torch_available():
|
25 |
+
raise OptionalDependencyNotAvailable()
|
26 |
+
except OptionalDependencyNotAvailable:
|
27 |
+
pass
|
28 |
+
else:
|
29 |
+
_import_structure["modeling_seggpt"] = [
|
30 |
+
"SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
31 |
+
"SegGptModel",
|
32 |
+
"SegGptPreTrainedModel",
|
33 |
+
"SegGptForImageSegmentation",
|
34 |
+
]
|
35 |
+
|
36 |
+
try:
|
37 |
+
if not is_vision_available():
|
38 |
+
raise OptionalDependencyNotAvailable()
|
39 |
+
except OptionalDependencyNotAvailable:
|
40 |
+
pass
|
41 |
+
else:
|
42 |
+
_import_structure["image_processing_seggpt"] = ["SegGptImageProcessor"]
|
43 |
+
|
44 |
+
if TYPE_CHECKING:
|
45 |
+
from .configuration_seggpt import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, SegGptConfig, SegGptOnnxConfig
|
46 |
+
|
47 |
+
try:
|
48 |
+
if not is_torch_available():
|
49 |
+
raise OptionalDependencyNotAvailable()
|
50 |
+
except OptionalDependencyNotAvailable:
|
51 |
+
pass
|
52 |
+
else:
|
53 |
+
from .modeling_seggpt import (
|
54 |
+
SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
55 |
+
SegGptForImageSegmentation,
|
56 |
+
SegGptModel,
|
57 |
+
SegGptPreTrainedModel,
|
58 |
+
)
|
59 |
+
|
60 |
+
try:
|
61 |
+
if not is_vision_available():
|
62 |
+
raise OptionalDependencyNotAvailable()
|
63 |
+
except OptionalDependencyNotAvailable:
|
64 |
+
pass
|
65 |
+
else:
|
66 |
+
from .image_processing_seggpt import SegGptImageProcessor
|
67 |
+
|
68 |
+
else:
|
69 |
+
import sys
|
70 |
+
|
71 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.18 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/configuration_seggpt.cpython-310.pyc
ADDED
Binary file (5.61 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/convert_seggpt_to_hf.cpython-310.pyc
ADDED
Binary file (7.12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/image_processing_seggpt.cpython-310.pyc
ADDED
Binary file (25 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/__pycache__/modeling_seggpt.cpython-310.pyc
ADDED
Binary file (34.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/configuration_seggpt.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" SegGpt model configuration"""
|
16 |
+
|
17 |
+
|
18 |
+
from ...configuration_utils import PretrainedConfig
|
19 |
+
from ...utils import logging
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.get_logger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
from ..deprecated._archive_maps import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
26 |
+
|
27 |
+
|
28 |
+
class SegGptConfig(PretrainedConfig):
|
29 |
+
r"""
|
30 |
+
This is the configuration class to store the configuration of a [`SegGptModel`]. It is used to instantiate a SegGPT
|
31 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
32 |
+
defaults will yield a similar configuration to that of the SegGPT
|
33 |
+
[BAAI/seggpt-vit-large](https://huggingface.co/BAAI/seggpt-vit-large) architecture.
|
34 |
+
|
35 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
36 |
+
documentation from [`PretrainedConfig`] for more information.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
hidden_size (`int`, *optional*, defaults to 1024):
|
40 |
+
Dimensionality of the encoder layers and the pooler layer.
|
41 |
+
num_hidden_layers (`int`, *optional*, defaults to 24):
|
42 |
+
Number of hidden layers in the Transformer encoder.
|
43 |
+
num_attention_heads (`int`, *optional*, defaults to 16):
|
44 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
45 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
46 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
47 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
48 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
49 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
50 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
51 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
52 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
53 |
+
The epsilon used by the layer normalization layers.
|
54 |
+
image_size (`List[int]`, *optional*, defaults to `[896, 448]`):
|
55 |
+
The size (resolution) of each image.
|
56 |
+
patch_size (`int`, *optional*, defaults to 16):
|
57 |
+
The size (resolution) of each patch.
|
58 |
+
num_channels (`int`, *optional*, defaults to 3):
|
59 |
+
The number of input channels.
|
60 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
61 |
+
Whether to add a bias to the queries, keys and values.
|
62 |
+
mlp_dim (`int`, *optional*):
|
63 |
+
The dimensionality of the MLP layer in the Transformer encoder. If unset, defaults to
|
64 |
+
`hidden_size` * 4.
|
65 |
+
drop_path_rate (`float`, *optional*, defaults to 0.1):
|
66 |
+
The drop path rate for the dropout layers.
|
67 |
+
pretrain_image_size (`int`, *optional*, defaults to 224):
|
68 |
+
The pretrained size of the absolute position embeddings.
|
69 |
+
decoder_hidden_size (`int`, *optional*, defaults to 64):
|
70 |
+
Hidden size for decoder.
|
71 |
+
use_relative_position_embeddings (`bool`, *optional*, defaults to `True`):
|
72 |
+
Whether to use relative position embeddings in the attention layers.
|
73 |
+
merge_index (`int`, *optional*, defaults to 2):
|
74 |
+
The index of the encoder layer to merge the embeddings.
|
75 |
+
intermediate_hidden_state_indices (`List[int]`, *optional*, defaults to `[5, 11, 17, 23]`):
|
76 |
+
The indices of the encoder layers which we store as features for the decoder.
|
77 |
+
beta (`float`, *optional*, defaults to 0.01):
|
78 |
+
Regularization factor for SegGptLoss (smooth-l1 loss).
|
79 |
+
|
80 |
+
Example:
|
81 |
+
|
82 |
+
```python
|
83 |
+
>>> from transformers import SegGptConfig, SegGptModel
|
84 |
+
|
85 |
+
>>> # Initializing a SegGPT seggpt-vit-large style configuration
|
86 |
+
>>> configuration = SegGptConfig()
|
87 |
+
|
88 |
+
>>> # Initializing a model (with random weights) from the seggpt-vit-large style configuration
|
89 |
+
>>> model = SegGptModel(configuration)
|
90 |
+
|
91 |
+
>>> # Accessing the model configuration
|
92 |
+
>>> configuration = model.config
|
93 |
+
```"""
|
94 |
+
|
95 |
+
model_type = "seggpt"
|
96 |
+
|
97 |
+
def __init__(
|
98 |
+
self,
|
99 |
+
hidden_size=1024,
|
100 |
+
num_hidden_layers=24,
|
101 |
+
num_attention_heads=16,
|
102 |
+
hidden_act="gelu",
|
103 |
+
hidden_dropout_prob=0.0,
|
104 |
+
initializer_range=0.02,
|
105 |
+
layer_norm_eps=1e-6,
|
106 |
+
image_size=[896, 448],
|
107 |
+
patch_size=16,
|
108 |
+
num_channels=3,
|
109 |
+
qkv_bias=True,
|
110 |
+
mlp_dim=None,
|
111 |
+
drop_path_rate=0.1,
|
112 |
+
pretrain_image_size=224,
|
113 |
+
decoder_hidden_size=64,
|
114 |
+
use_relative_position_embeddings=True,
|
115 |
+
merge_index=2,
|
116 |
+
intermediate_hidden_state_indices=[5, 11, 17, 23],
|
117 |
+
beta=0.01,
|
118 |
+
**kwargs,
|
119 |
+
):
|
120 |
+
super().__init__(**kwargs)
|
121 |
+
|
122 |
+
if merge_index > min(intermediate_hidden_state_indices):
|
123 |
+
raise ValueError(
|
124 |
+
f"Merge index must be less than the minimum encoder output index, but got {merge_index=} and {intermediate_hidden_state_indices=}"
|
125 |
+
)
|
126 |
+
self.hidden_size = hidden_size
|
127 |
+
self.num_hidden_layers = num_hidden_layers
|
128 |
+
self.num_attention_heads = num_attention_heads
|
129 |
+
self.hidden_act = hidden_act
|
130 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
131 |
+
self.initializer_range = initializer_range
|
132 |
+
self.layer_norm_eps = layer_norm_eps
|
133 |
+
self.image_size = image_size
|
134 |
+
self.patch_size = patch_size
|
135 |
+
self.num_channels = num_channels
|
136 |
+
self.qkv_bias = qkv_bias
|
137 |
+
self.drop_path_rate = drop_path_rate
|
138 |
+
self.pretrain_image_size = pretrain_image_size
|
139 |
+
self.decoder_hidden_size = decoder_hidden_size
|
140 |
+
self.use_relative_position_embeddings = use_relative_position_embeddings
|
141 |
+
self.merge_index = merge_index
|
142 |
+
self.intermediate_hidden_state_indices = intermediate_hidden_state_indices
|
143 |
+
self.beta = beta
|
144 |
+
self.mlp_dim = int(hidden_size * 4) if mlp_dim is None else mlp_dim
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/convert_seggpt_to_hf.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert SegGPT checkpoints from the original repository.
|
16 |
+
|
17 |
+
URL: https://github.com/baaivision/Painter/tree/main/SegGPT
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
import argparse
|
22 |
+
|
23 |
+
import requests
|
24 |
+
import torch
|
25 |
+
from PIL import Image
|
26 |
+
|
27 |
+
from transformers import SegGptConfig, SegGptForImageSegmentation, SegGptImageProcessor
|
28 |
+
from transformers.utils import logging
|
29 |
+
|
30 |
+
|
31 |
+
logging.set_verbosity_info()
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
|
35 |
+
# here we list all keys to be renamed (original name on the left, our name on the right)
|
36 |
+
def create_rename_keys(config):
|
37 |
+
rename_keys = []
|
38 |
+
|
39 |
+
# fmt: off
|
40 |
+
|
41 |
+
# rename embedding and its parameters
|
42 |
+
rename_keys.append(("patch_embed.proj.weight", "model.embeddings.patch_embeddings.projection.weight"))
|
43 |
+
rename_keys.append(("patch_embed.proj.bias", "model.embeddings.patch_embeddings.projection.bias"))
|
44 |
+
rename_keys.append(("mask_token", "model.embeddings.mask_token"))
|
45 |
+
rename_keys.append(("segment_token_x", "model.embeddings.segment_token_input"))
|
46 |
+
rename_keys.append(("segment_token_y", "model.embeddings.segment_token_prompt"))
|
47 |
+
rename_keys.append(("type_token_cls", "model.embeddings.type_token_semantic"))
|
48 |
+
rename_keys.append(("type_token_ins", "model.embeddings.type_token_instance"))
|
49 |
+
rename_keys.append(("pos_embed", "model.embeddings.position_embeddings"))
|
50 |
+
|
51 |
+
# rename decoder and other
|
52 |
+
rename_keys.append(("norm.weight", "model.encoder.layernorm.weight"))
|
53 |
+
rename_keys.append(("norm.bias", "model.encoder.layernorm.bias"))
|
54 |
+
rename_keys.append(("decoder_embed.weight", "decoder.decoder_embed.weight"))
|
55 |
+
rename_keys.append(("decoder_embed.bias", "decoder.decoder_embed.bias"))
|
56 |
+
rename_keys.append(("decoder_pred.0.weight", "decoder.decoder_pred.conv.weight"))
|
57 |
+
rename_keys.append(("decoder_pred.0.bias", "decoder.decoder_pred.conv.bias"))
|
58 |
+
rename_keys.append(("decoder_pred.1.weight", "decoder.decoder_pred.layernorm.weight"))
|
59 |
+
rename_keys.append(("decoder_pred.1.bias", "decoder.decoder_pred.layernorm.bias"))
|
60 |
+
rename_keys.append(("decoder_pred.3.weight", "decoder.decoder_pred.head.weight"))
|
61 |
+
rename_keys.append(("decoder_pred.3.bias", "decoder.decoder_pred.head.bias"))
|
62 |
+
|
63 |
+
# rename blocks
|
64 |
+
for i in range(config.num_hidden_layers):
|
65 |
+
rename_keys.append((f"blocks.{i}.attn.qkv.weight", f"model.encoder.layers.{i}.attention.qkv.weight"))
|
66 |
+
rename_keys.append((f"blocks.{i}.attn.qkv.bias", f"model.encoder.layers.{i}.attention.qkv.bias"))
|
67 |
+
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"model.encoder.layers.{i}.attention.proj.weight"))
|
68 |
+
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"model.encoder.layers.{i}.attention.proj.bias"))
|
69 |
+
rename_keys.append((f"blocks.{i}.attn.rel_pos_h", f"model.encoder.layers.{i}.attention.rel_pos_h"))
|
70 |
+
rename_keys.append((f"blocks.{i}.attn.rel_pos_w", f"model.encoder.layers.{i}.attention.rel_pos_w"))
|
71 |
+
|
72 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"model.encoder.layers.{i}.mlp.lin1.weight"))
|
73 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"model.encoder.layers.{i}.mlp.lin1.bias"))
|
74 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"model.encoder.layers.{i}.mlp.lin2.weight"))
|
75 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"model.encoder.layers.{i}.mlp.lin2.bias"))
|
76 |
+
|
77 |
+
rename_keys.append((f"blocks.{i}.norm1.weight", f"model.encoder.layers.{i}.layernorm_before.weight"))
|
78 |
+
rename_keys.append((f"blocks.{i}.norm1.bias", f"model.encoder.layers.{i}.layernorm_before.bias"))
|
79 |
+
rename_keys.append((f"blocks.{i}.norm2.weight", f"model.encoder.layers.{i}.layernorm_after.weight"))
|
80 |
+
rename_keys.append((f"blocks.{i}.norm2.bias", f"model.encoder.layers.{i}.layernorm_after.bias"))
|
81 |
+
|
82 |
+
# fmt: on
|
83 |
+
|
84 |
+
return rename_keys
|
85 |
+
|
86 |
+
|
87 |
+
def rename_key(dct, old, new):
|
88 |
+
val = dct.pop(old)
|
89 |
+
dct[new] = val
|
90 |
+
|
91 |
+
|
92 |
+
# We will verify our results on spongebob images
|
93 |
+
def prepare_input():
|
94 |
+
image_input_url = (
|
95 |
+
"https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg"
|
96 |
+
)
|
97 |
+
image_prompt_url = (
|
98 |
+
"https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg"
|
99 |
+
)
|
100 |
+
mask_prompt_url = (
|
101 |
+
"https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png"
|
102 |
+
)
|
103 |
+
|
104 |
+
image_input = Image.open(requests.get(image_input_url, stream=True).raw)
|
105 |
+
image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw)
|
106 |
+
mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw)
|
107 |
+
|
108 |
+
return image_input, image_prompt, mask_prompt
|
109 |
+
|
110 |
+
|
111 |
+
@torch.no_grad()
|
112 |
+
def convert_seggpt_checkpoint(args):
|
113 |
+
model_name = args.model_name
|
114 |
+
pytorch_dump_folder_path = args.pytorch_dump_folder_path
|
115 |
+
verify_logits = args.verify_logits
|
116 |
+
push_to_hub = args.push_to_hub
|
117 |
+
|
118 |
+
# Define default GroundingDINO configuation
|
119 |
+
config = SegGptConfig()
|
120 |
+
|
121 |
+
# Load original checkpoint
|
122 |
+
checkpoint_url = "https://huggingface.co/BAAI/SegGpt/blob/main/seggpt_vit_large.pth"
|
123 |
+
original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
|
124 |
+
|
125 |
+
# # Rename keys
|
126 |
+
new_state_dict = original_state_dict.copy()
|
127 |
+
rename_keys = create_rename_keys(config)
|
128 |
+
|
129 |
+
for src, dest in rename_keys:
|
130 |
+
rename_key(new_state_dict, src, dest)
|
131 |
+
|
132 |
+
# Load HF model
|
133 |
+
model = SegGptForImageSegmentation(config)
|
134 |
+
model.eval()
|
135 |
+
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
|
136 |
+
print("Missing keys:", missing_keys)
|
137 |
+
print("Unexpected keys:", unexpected_keys)
|
138 |
+
|
139 |
+
input_img, prompt_img, prompt_mask = prepare_input()
|
140 |
+
image_processor = SegGptImageProcessor()
|
141 |
+
inputs = image_processor(images=input_img, prompt_images=prompt_img, prompt_masks=prompt_mask, return_tensors="pt")
|
142 |
+
|
143 |
+
expected_prompt_pixel_values = torch.tensor(
|
144 |
+
[
|
145 |
+
[[-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965]],
|
146 |
+
[[1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583]],
|
147 |
+
[[2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088]],
|
148 |
+
]
|
149 |
+
)
|
150 |
+
|
151 |
+
expected_pixel_values = torch.tensor(
|
152 |
+
[
|
153 |
+
[[1.6324, 1.6153, 1.5810], [1.6153, 1.5982, 1.5810], [1.5810, 1.5639, 1.5639]],
|
154 |
+
[[1.2731, 1.2556, 1.2206], [1.2556, 1.2381, 1.2031], [1.2206, 1.2031, 1.1681]],
|
155 |
+
[[1.6465, 1.6465, 1.6465], [1.6465, 1.6465, 1.6465], [1.6291, 1.6291, 1.6291]],
|
156 |
+
]
|
157 |
+
)
|
158 |
+
|
159 |
+
expected_prompt_masks = torch.tensor(
|
160 |
+
[
|
161 |
+
[[-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179]],
|
162 |
+
[[-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357]],
|
163 |
+
[[-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044]],
|
164 |
+
]
|
165 |
+
)
|
166 |
+
|
167 |
+
assert torch.allclose(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, atol=1e-4)
|
168 |
+
assert torch.allclose(inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, atol=1e-4)
|
169 |
+
assert torch.allclose(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, atol=1e-4)
|
170 |
+
|
171 |
+
torch.manual_seed(2)
|
172 |
+
outputs = model(**inputs)
|
173 |
+
print(outputs)
|
174 |
+
|
175 |
+
if verify_logits:
|
176 |
+
expected_output = torch.tensor(
|
177 |
+
[
|
178 |
+
[[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]],
|
179 |
+
[[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]],
|
180 |
+
[[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]],
|
181 |
+
]
|
182 |
+
)
|
183 |
+
assert torch.allclose(outputs.pred_masks[0, :, :3, :3], expected_output, atol=1e-4)
|
184 |
+
print("Looks good!")
|
185 |
+
else:
|
186 |
+
print("Converted without verifying logits")
|
187 |
+
|
188 |
+
if pytorch_dump_folder_path is not None:
|
189 |
+
print(f"Saving model and processor for {model_name} to {pytorch_dump_folder_path}")
|
190 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
191 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
192 |
+
|
193 |
+
if push_to_hub:
|
194 |
+
print(f"Pushing model and processor for {model_name} to hub")
|
195 |
+
model.push_to_hub(f"EduardoPacheco/{model_name}")
|
196 |
+
image_processor.push_to_hub(f"EduardoPacheco/{model_name}")
|
197 |
+
|
198 |
+
|
199 |
+
if __name__ == "__main__":
|
200 |
+
parser = argparse.ArgumentParser()
|
201 |
+
# Required parameters
|
202 |
+
parser.add_argument(
|
203 |
+
"--model_name",
|
204 |
+
default="seggpt-vit-large",
|
205 |
+
type=str,
|
206 |
+
choices=["seggpt-vit-large"],
|
207 |
+
help="Name of the SegGpt model you'd like to convert.",
|
208 |
+
)
|
209 |
+
parser.add_argument(
|
210 |
+
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
|
211 |
+
)
|
212 |
+
parser.add_argument(
|
213 |
+
"--verify_logits",
|
214 |
+
action="store_false",
|
215 |
+
help="Whether or not to verify the logits against the original implementation.",
|
216 |
+
)
|
217 |
+
parser.add_argument(
|
218 |
+
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
|
219 |
+
)
|
220 |
+
|
221 |
+
args = parser.parse_args()
|
222 |
+
convert_seggpt_checkpoint(args)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/seggpt/image_processing_seggpt.py
ADDED
@@ -0,0 +1,626 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for SegGPT."""
|
16 |
+
|
17 |
+
from typing import Dict, List, Optional, Tuple, Union
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
22 |
+
from ...image_transforms import resize, to_channel_dimension_format
|
23 |
+
from ...image_utils import (
|
24 |
+
IMAGENET_DEFAULT_MEAN,
|
25 |
+
IMAGENET_DEFAULT_STD,
|
26 |
+
ChannelDimension,
|
27 |
+
ImageInput,
|
28 |
+
PILImageResampling,
|
29 |
+
get_channel_dimension_axis,
|
30 |
+
infer_channel_dimension_format,
|
31 |
+
is_scaled_image,
|
32 |
+
make_list_of_images,
|
33 |
+
to_numpy_array,
|
34 |
+
valid_images,
|
35 |
+
)
|
36 |
+
from ...utils import TensorType, is_torch_available, logging, requires_backends
|
37 |
+
|
38 |
+
|
39 |
+
if is_torch_available():
|
40 |
+
import torch
|
41 |
+
|
42 |
+
|
43 |
+
logger = logging.get_logger(__name__)
|
44 |
+
|
45 |
+
|
46 |
+
# See https://arxiv.org/pdf/2212.02499.pdf at 3.1 Redefining Output Spaces as "Images" - Semantic Segmentation from PAINTER paper
|
47 |
+
# Taken from https://github.com/Abdullah-Meda/Painter/blob/main/Painter/data/coco_semseg/gen_color_coco_panoptic_segm.py#L31
|
48 |
+
def build_palette(num_labels: int) -> List[Tuple[int, int]]:
|
49 |
+
base = int(num_labels ** (1 / 3)) + 1
|
50 |
+
margin = 256 // base
|
51 |
+
|
52 |
+
# we assume that class_idx 0 is the background which is mapped to black
|
53 |
+
color_list = [(0, 0, 0)]
|
54 |
+
for location in range(num_labels):
|
55 |
+
num_seq_r = location // base**2
|
56 |
+
num_seq_g = (location % base**2) // base
|
57 |
+
num_seq_b = location % base
|
58 |
+
|
59 |
+
R = 255 - num_seq_r * margin
|
60 |
+
G = 255 - num_seq_g * margin
|
61 |
+
B = 255 - num_seq_b * margin
|
62 |
+
|
63 |
+
color_list.append((R, G, B))
|
64 |
+
|
65 |
+
return color_list
|
66 |
+
|
67 |
+
|
68 |
+
def get_num_channels(image: np.ndarray, input_data_format: ChannelDimension) -> int:
|
69 |
+
if image.ndim == 2:
|
70 |
+
return 0
|
71 |
+
|
72 |
+
channel_idx = get_channel_dimension_axis(image, input_data_format)
|
73 |
+
return image.shape[channel_idx]
|
74 |
+
|
75 |
+
|
76 |
+
def mask_to_rgb(
|
77 |
+
mask: np.ndarray,
|
78 |
+
palette: Optional[List[Tuple[int, int]]] = None,
|
79 |
+
input_data_format: Optional[ChannelDimension] = None,
|
80 |
+
data_format: Optional[ChannelDimension] = None,
|
81 |
+
) -> np.ndarray:
|
82 |
+
if input_data_format is None and mask.ndim > 2:
|
83 |
+
input_data_format = infer_channel_dimension_format(mask)
|
84 |
+
|
85 |
+
data_format = data_format if data_format is not None else input_data_format
|
86 |
+
|
87 |
+
num_channels = get_num_channels(mask, input_data_format)
|
88 |
+
|
89 |
+
if num_channels == 3:
|
90 |
+
return to_channel_dimension_format(mask, data_format, input_data_format) if data_format is not None else mask
|
91 |
+
|
92 |
+
if palette is not None:
|
93 |
+
height, width = mask.shape
|
94 |
+
|
95 |
+
rgb_mask = np.zeros((3, height, width), dtype=np.uint8)
|
96 |
+
|
97 |
+
classes_in_mask = np.unique(mask)
|
98 |
+
|
99 |
+
for class_idx in classes_in_mask:
|
100 |
+
rgb_value = palette[class_idx]
|
101 |
+
class_mask = (mask == class_idx).astype(np.uint8)
|
102 |
+
class_mask = np.expand_dims(class_mask, axis=-1)
|
103 |
+
class_rgb_mask = class_mask * np.array(rgb_value)
|
104 |
+
class_rgb_mask = np.moveaxis(class_rgb_mask, -1, 0)
|
105 |
+
rgb_mask += class_rgb_mask.astype(np.uint8)
|
106 |
+
|
107 |
+
rgb_mask = np.clip(rgb_mask, 0, 255).astype(np.uint8)
|
108 |
+
|
109 |
+
else:
|
110 |
+
rgb_mask = np.repeat(mask[None, ...], 3, axis=0)
|
111 |
+
|
112 |
+
return (
|
113 |
+
to_channel_dimension_format(rgb_mask, data_format, input_data_format) if data_format is not None else rgb_mask
|
114 |
+
)
|
115 |
+
|
116 |
+
|
117 |
+
class SegGptImageProcessor(BaseImageProcessor):
|
118 |
+
r"""
|
119 |
+
Constructs a SegGpt image processor.
|
120 |
+
|
121 |
+
Args:
|
122 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
123 |
+
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
|
124 |
+
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
|
125 |
+
size (`dict`, *optional*, defaults to `{"height": 448, "width": 448}`):
|
126 |
+
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
|
127 |
+
method.
|
128 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
129 |
+
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
|
130 |
+
`preprocess` method.
|
131 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
132 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
133 |
+
parameter in the `preprocess` method.
|
134 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
135 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
136 |
+
`preprocess` method.
|
137 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
138 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
139 |
+
method.
|
140 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
|
141 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
142 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
143 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
|
144 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
145 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
146 |
+
"""
|
147 |
+
|
148 |
+
model_input_names = ["pixel_values"]
|
149 |
+
|
150 |
+
def __init__(
|
151 |
+
self,
|
152 |
+
do_resize: bool = True,
|
153 |
+
size: Optional[Dict[str, int]] = None,
|
154 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
155 |
+
do_rescale: bool = True,
|
156 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
157 |
+
do_normalize: bool = True,
|
158 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
159 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
160 |
+
**kwargs,
|
161 |
+
) -> None:
|
162 |
+
super().__init__(**kwargs)
|
163 |
+
size = size if size is not None else {"height": 448, "width": 448}
|
164 |
+
size = get_size_dict(size)
|
165 |
+
self.do_resize = do_resize
|
166 |
+
self.do_rescale = do_rescale
|
167 |
+
self.do_normalize = do_normalize
|
168 |
+
self.size = size
|
169 |
+
self.resample = resample
|
170 |
+
self.rescale_factor = rescale_factor
|
171 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
|
172 |
+
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
|
173 |
+
|
174 |
+
def get_palette(self, num_labels: int) -> List[Tuple[int, int]]:
|
175 |
+
"""Build a palette to map the prompt mask from a single channel to a 3 channel RGB.
|
176 |
+
|
177 |
+
Args:
|
178 |
+
num_labels (`int`):
|
179 |
+
Number of classes in the segmentation task (excluding the background).
|
180 |
+
|
181 |
+
Returns:
|
182 |
+
`List[Tuple[int, int]]`: Palette to map the prompt mask from a single channel to a 3 channel RGB.
|
183 |
+
"""
|
184 |
+
return build_palette(num_labels)
|
185 |
+
|
186 |
+
def mask_to_rgb(
|
187 |
+
self,
|
188 |
+
image: np.ndarray,
|
189 |
+
palette: Optional[List[Tuple[int, int]]] = None,
|
190 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
191 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
192 |
+
) -> np.ndarray:
|
193 |
+
"""Convert a mask to RGB format.
|
194 |
+
|
195 |
+
Args:
|
196 |
+
image (`np.ndarray`):
|
197 |
+
Mask to convert to RGB format. If the mask is already in RGB format, it will be passed through.
|
198 |
+
palette (`List[Tuple[int, int]]`, *optional*, defaults to `None`):
|
199 |
+
Palette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel
|
200 |
+
dimension.
|
201 |
+
data_format (`ChannelDimension` or `str`, *optional*):
|
202 |
+
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
203 |
+
image is used. Can be one of:
|
204 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
205 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
206 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
207 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
208 |
+
from the input image. Can be one of:
|
209 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
210 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
211 |
+
|
212 |
+
Returns:
|
213 |
+
`np.ndarray`: The mask in RGB format.
|
214 |
+
"""
|
215 |
+
return mask_to_rgb(
|
216 |
+
image,
|
217 |
+
palette=palette,
|
218 |
+
data_format=data_format,
|
219 |
+
input_data_format=input_data_format,
|
220 |
+
)
|
221 |
+
|
222 |
+
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
|
223 |
+
def resize(
|
224 |
+
self,
|
225 |
+
image: np.ndarray,
|
226 |
+
size: Dict[str, int],
|
227 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
228 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
229 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
230 |
+
**kwargs,
|
231 |
+
) -> np.ndarray:
|
232 |
+
"""
|
233 |
+
Resize an image to `(size["height"], size["width"])`.
|
234 |
+
|
235 |
+
Args:
|
236 |
+
image (`np.ndarray`):
|
237 |
+
Image to resize.
|
238 |
+
size (`Dict[str, int]`):
|
239 |
+
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
|
240 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
241 |
+
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
|
242 |
+
data_format (`ChannelDimension` or `str`, *optional*):
|
243 |
+
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
244 |
+
image is used. Can be one of:
|
245 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
246 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
247 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
248 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
249 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
250 |
+
from the input image. Can be one of:
|
251 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
252 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
253 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
254 |
+
|
255 |
+
Returns:
|
256 |
+
`np.ndarray`: The resized image.
|
257 |
+
"""
|
258 |
+
size = get_size_dict(size)
|
259 |
+
if "height" not in size or "width" not in size:
|
260 |
+
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
|
261 |
+
output_size = (size["height"], size["width"])
|
262 |
+
return resize(
|
263 |
+
image,
|
264 |
+
size=output_size,
|
265 |
+
resample=resample,
|
266 |
+
data_format=data_format,
|
267 |
+
input_data_format=input_data_format,
|
268 |
+
**kwargs,
|
269 |
+
)
|
270 |
+
|
271 |
+
def _preprocess_step(
|
272 |
+
self,
|
273 |
+
images: ImageInput,
|
274 |
+
is_mask: bool = False,
|
275 |
+
do_resize: Optional[bool] = None,
|
276 |
+
size: Dict[str, int] = None,
|
277 |
+
resample: PILImageResampling = None,
|
278 |
+
do_rescale: Optional[bool] = None,
|
279 |
+
rescale_factor: Optional[float] = None,
|
280 |
+
do_normalize: Optional[bool] = None,
|
281 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
282 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
283 |
+
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
|
284 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
285 |
+
num_labels: Optional[int] = None,
|
286 |
+
**kwargs,
|
287 |
+
):
|
288 |
+
"""
|
289 |
+
Preprocess an image or batch of images.
|
290 |
+
|
291 |
+
Args:
|
292 |
+
images (`ImageInput`):
|
293 |
+
Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
294 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
295 |
+
is_mask (`bool`, *optional*, defaults to `False`):
|
296 |
+
Whether the image is a mask. If True, the image is converted to RGB using the palette if
|
297 |
+
`self.num_labels` is specified otherwise RGB is achieved by duplicating the channel.
|
298 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
299 |
+
Whether to resize the image.
|
300 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
301 |
+
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
|
302 |
+
resizing.
|
303 |
+
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
|
304 |
+
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
|
305 |
+
an effect if `do_resize` is set to `True`.
|
306 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
307 |
+
Whether to rescale the image values between [0 - 1].
|
308 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
309 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
310 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
311 |
+
Whether to normalize the image.
|
312 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
313 |
+
Image mean to use if `do_normalize` is set to `True`.
|
314 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
315 |
+
Image standard deviation to use if `do_normalize` is set to `True`.
|
316 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
317 |
+
The type of tensors to return. Can be one of:
|
318 |
+
- Unset: Return a list of `np.ndarray`.
|
319 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
320 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
321 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
322 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
323 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
324 |
+
The channel dimension format for the output image. Can be one of:
|
325 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
326 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
327 |
+
- Unset: Use the channel dimension format of the input image.
|
328 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
329 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
330 |
+
from the input image. Can be one of:
|
331 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
332 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
333 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
334 |
+
num_labels: (`int`, *optional*):
|
335 |
+
Number of classes in the segmentation task (excluding the background). If specified, a palette will be
|
336 |
+
built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx
|
337 |
+
channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed
|
338 |
+
through as is if it is already in RGB format or being duplicated across the channel dimension.
|
339 |
+
"""
|
340 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
341 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
342 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
343 |
+
resample = resample if resample is not None else self.resample
|
344 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
345 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
346 |
+
image_std = image_std if image_std is not None else self.image_std
|
347 |
+
|
348 |
+
size = size if size is not None else self.size
|
349 |
+
size_dict = get_size_dict(size)
|
350 |
+
|
351 |
+
images = make_list_of_images(images)
|
352 |
+
|
353 |
+
if not valid_images(images):
|
354 |
+
raise ValueError(
|
355 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
356 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
357 |
+
)
|
358 |
+
|
359 |
+
if do_resize and size is None:
|
360 |
+
raise ValueError("Size must be specified if do_resize is True.")
|
361 |
+
|
362 |
+
if do_rescale and rescale_factor is None:
|
363 |
+
raise ValueError("Rescale factor must be specified if do_rescale is True.")
|
364 |
+
|
365 |
+
if do_normalize and (image_mean is None or image_std is None):
|
366 |
+
raise ValueError("Image mean and std must be specified if do_normalize is True.")
|
367 |
+
|
368 |
+
# All transformations expect numpy arrays.
|
369 |
+
images = [to_numpy_array(image) for image in images]
|
370 |
+
|
371 |
+
if is_scaled_image(images[0]) and do_rescale:
|
372 |
+
logger.warning_once(
|
373 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
374 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
375 |
+
)
|
376 |
+
|
377 |
+
if input_data_format is None and not is_mask:
|
378 |
+
# We assume that all images have the same channel dimension format.
|
379 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
380 |
+
|
381 |
+
if is_mask:
|
382 |
+
palette = self.get_palette(num_labels) if num_labels is not None else None
|
383 |
+
# Since this is the input for the next transformations its format should be the same as the input_data_format
|
384 |
+
images = [
|
385 |
+
self.mask_to_rgb(image=image, palette=palette, data_format=ChannelDimension.FIRST) for image in images
|
386 |
+
]
|
387 |
+
input_data_format = ChannelDimension.FIRST
|
388 |
+
|
389 |
+
if do_resize:
|
390 |
+
images = [
|
391 |
+
self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
|
392 |
+
for image in images
|
393 |
+
]
|
394 |
+
|
395 |
+
if do_rescale:
|
396 |
+
images = [
|
397 |
+
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
398 |
+
for image in images
|
399 |
+
]
|
400 |
+
|
401 |
+
if do_normalize:
|
402 |
+
images = [
|
403 |
+
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
404 |
+
for image in images
|
405 |
+
]
|
406 |
+
|
407 |
+
images = [
|
408 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
|
409 |
+
]
|
410 |
+
|
411 |
+
return images
|
412 |
+
|
413 |
+
def preprocess(
|
414 |
+
self,
|
415 |
+
images: Optional[ImageInput] = None,
|
416 |
+
prompt_images: Optional[ImageInput] = None,
|
417 |
+
prompt_masks: Optional[ImageInput] = None,
|
418 |
+
do_resize: Optional[bool] = None,
|
419 |
+
size: Dict[str, int] = None,
|
420 |
+
resample: PILImageResampling = None,
|
421 |
+
do_rescale: Optional[bool] = None,
|
422 |
+
rescale_factor: Optional[float] = None,
|
423 |
+
do_normalize: Optional[bool] = None,
|
424 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
425 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
426 |
+
num_labels: Optional[int] = None,
|
427 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
428 |
+
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
|
429 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
430 |
+
**kwargs,
|
431 |
+
):
|
432 |
+
"""
|
433 |
+
Preprocess an image or batch of images.
|
434 |
+
|
435 |
+
Args:
|
436 |
+
images (`ImageInput`):
|
437 |
+
Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
438 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
439 |
+
prompt_images (`ImageInput`):
|
440 |
+
Prompt image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
441 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
442 |
+
prompt_masks (`ImageInput`):
|
443 |
+
Prompt mask from prompt image to _preprocess. Expects a single or batch of masks. If the mask masks are
|
444 |
+
a single channel then it will be converted to RGB using the palette if `self.num_labels` is specified
|
445 |
+
or by just repeating the channel if not. If the mask is already in RGB format, it will be passed through.
|
446 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
447 |
+
Whether to resize the image.
|
448 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
449 |
+
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
|
450 |
+
resizing.
|
451 |
+
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
|
452 |
+
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
|
453 |
+
an effect if `do_resize` is set to `True`. Doesn't apply to prompt mask as it is resized using nearest.
|
454 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
455 |
+
Whether to rescale the image values between [0 - 1].
|
456 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
457 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
458 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
459 |
+
Whether to normalize the image.
|
460 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
461 |
+
Image mean to use if `do_normalize` is set to `True`.
|
462 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
463 |
+
Image standard deviation to use if `do_normalize` is set to `True`.
|
464 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
465 |
+
The type of tensors to return. Can be one of:
|
466 |
+
- Unset: Return a list of `np.ndarray`.
|
467 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
468 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
469 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
470 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
471 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
472 |
+
The channel dimension format for the output image. Can be one of:
|
473 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
474 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
475 |
+
- Unset: Use the channel dimension format of the input image.
|
476 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
477 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
478 |
+
from the input image. Can be one of:
|
479 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
480 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
481 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
482 |
+
num_labels: (`int`, *optional*):
|
483 |
+
Number of classes in the segmentation task (excluding the background). If specified, a palette will be
|
484 |
+
built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx
|
485 |
+
channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed
|
486 |
+
through as is if it is already in RGB format or being duplicated across the channel dimension.
|
487 |
+
"""
|
488 |
+
if all(v is None for v in [images, prompt_images, prompt_masks]):
|
489 |
+
raise ValueError("At least one of images, prompt_images, prompt_masks must be specified.")
|
490 |
+
|
491 |
+
data = {}
|
492 |
+
|
493 |
+
if images is not None:
|
494 |
+
images = self._preprocess_step(
|
495 |
+
images,
|
496 |
+
is_mask=False,
|
497 |
+
do_resize=do_resize,
|
498 |
+
size=size,
|
499 |
+
resample=resample,
|
500 |
+
do_rescale=do_rescale,
|
501 |
+
rescale_factor=rescale_factor,
|
502 |
+
do_normalize=do_normalize,
|
503 |
+
image_mean=image_mean,
|
504 |
+
image_std=image_std,
|
505 |
+
data_format=data_format,
|
506 |
+
input_data_format=input_data_format,
|
507 |
+
**kwargs,
|
508 |
+
)
|
509 |
+
|
510 |
+
data["pixel_values"] = images
|
511 |
+
|
512 |
+
if prompt_images is not None:
|
513 |
+
prompt_images = self._preprocess_step(
|
514 |
+
prompt_images,
|
515 |
+
is_mask=False,
|
516 |
+
do_resize=do_resize,
|
517 |
+
size=size,
|
518 |
+
resample=resample,
|
519 |
+
do_rescale=do_rescale,
|
520 |
+
rescale_factor=rescale_factor,
|
521 |
+
do_normalize=do_normalize,
|
522 |
+
image_mean=image_mean,
|
523 |
+
image_std=image_std,
|
524 |
+
data_format=data_format,
|
525 |
+
input_data_format=input_data_format,
|
526 |
+
**kwargs,
|
527 |
+
)
|
528 |
+
|
529 |
+
data["prompt_pixel_values"] = prompt_images
|
530 |
+
|
531 |
+
if prompt_masks is not None:
|
532 |
+
prompt_masks = self._preprocess_step(
|
533 |
+
prompt_masks,
|
534 |
+
is_mask=True,
|
535 |
+
do_resize=do_resize,
|
536 |
+
size=size,
|
537 |
+
resample=PILImageResampling.NEAREST,
|
538 |
+
do_rescale=do_rescale,
|
539 |
+
rescale_factor=rescale_factor,
|
540 |
+
do_normalize=do_normalize,
|
541 |
+
image_mean=image_mean,
|
542 |
+
image_std=image_std,
|
543 |
+
data_format=data_format,
|
544 |
+
input_data_format=input_data_format,
|
545 |
+
num_labels=num_labels,
|
546 |
+
**kwargs,
|
547 |
+
)
|
548 |
+
|
549 |
+
data["prompt_masks"] = prompt_masks
|
550 |
+
|
551 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
552 |
+
|
553 |
+
def post_process_semantic_segmentation(
|
554 |
+
self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None, num_labels: Optional[int] = None
|
555 |
+
):
|
556 |
+
"""
|
557 |
+
Converts the output of [`SegGptImageSegmentationOutput`] into segmentation maps. Only supports
|
558 |
+
PyTorch.
|
559 |
+
|
560 |
+
Args:
|
561 |
+
outputs ([`SegGptImageSegmentationOutput`]):
|
562 |
+
Raw outputs of the model.
|
563 |
+
target_sizes (`List[Tuple[int, int]]`, *optional*):
|
564 |
+
List of length (batch_size), where each list item (`Tuple[int, int]`) corresponds to the requested
|
565 |
+
final size (height, width) of each prediction. If left to None, predictions will not be resized.
|
566 |
+
num_labels (`int`, *optional*):
|
567 |
+
Number of classes in the segmentation task (excluding the background). If specified, a palette will be
|
568 |
+
built, assuming that class_idx 0 is the background, to map prediction masks from RGB values to class
|
569 |
+
indices. This value should be the same used when preprocessing inputs.
|
570 |
+
Returns:
|
571 |
+
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
|
572 |
+
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
|
573 |
+
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
|
574 |
+
"""
|
575 |
+
requires_backends(self, ["torch"])
|
576 |
+
# batch_size x num_channels x 2*height x width
|
577 |
+
masks = outputs.pred_masks
|
578 |
+
|
579 |
+
# Predicted mask and prompt are concatenated in the height dimension
|
580 |
+
# batch_size x num_channels x height x width
|
581 |
+
masks = masks[:, :, masks.shape[2] // 2 :, :]
|
582 |
+
|
583 |
+
# To unnormalize we need to permute to channel last
|
584 |
+
# batch_size x height x width x num_channels
|
585 |
+
std = torch.tensor(self.image_std).to(masks.device)
|
586 |
+
mean = torch.tensor(self.image_mean).to(masks.device)
|
587 |
+
|
588 |
+
masks = masks.permute(0, 2, 3, 1) * std + mean
|
589 |
+
|
590 |
+
# batch_size x num_channels x height x width
|
591 |
+
masks = masks.permute(0, 3, 1, 2)
|
592 |
+
|
593 |
+
# Clip to match with palette if specified
|
594 |
+
masks = torch.clip(masks * 255, 0, 255)
|
595 |
+
|
596 |
+
semantic_segmentation = []
|
597 |
+
palette_tensor = None
|
598 |
+
palette = self.get_palette(num_labels) if num_labels is not None else None
|
599 |
+
if palette is not None:
|
600 |
+
palette_tensor = torch.tensor(palette).float().to(masks.device)
|
601 |
+
_, num_channels, _, _ = masks.shape
|
602 |
+
palette_tensor = palette_tensor.view(1, 1, num_labels + 1, num_channels)
|
603 |
+
|
604 |
+
for idx, mask in enumerate(masks):
|
605 |
+
if target_sizes is not None:
|
606 |
+
mask = torch.nn.functional.interpolate(
|
607 |
+
mask.unsqueeze(0),
|
608 |
+
size=target_sizes[idx],
|
609 |
+
mode="nearest",
|
610 |
+
)[0]
|
611 |
+
|
612 |
+
if num_labels is not None:
|
613 |
+
channels, height, width = mask.shape
|
614 |
+
dist = mask.permute(1, 2, 0).view(height, width, 1, channels)
|
615 |
+
dist = dist - palette_tensor
|
616 |
+
dist = torch.pow(dist, 2)
|
617 |
+
dist = torch.sum(dist, dim=-1)
|
618 |
+
pred = dist.argmin(dim=-1)
|
619 |
+
|
620 |
+
else:
|
621 |
+
# If no palette is specified SegGpt will try to paint using the mask class idx as RGB
|
622 |
+
pred = mask.mean(dim=0).int()
|
623 |
+
|
624 |
+
semantic_segmentation.append(pred)
|
625 |
+
|
626 |
+
return semantic_segmentation
|