Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__init__.py +112 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py +231 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py +374 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py +33 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py +531 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py +1425 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py +948 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__init__.py +113 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/configuration_deit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/convert_deit_timm_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/feature_extraction_deit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_deit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/configuration_deit.py +142 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/convert_deit_timm_to_pytorch.py +219 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/feature_extraction_deit.py +33 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/image_processing_deit.py +320 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_deit.py +891 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_tf_deit.py +1178 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py +175 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py +856 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py +65 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/__init__.py +68 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/configuration_nllb_moe.py +218 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py +160 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/modeling_nllb_moe.py +1792 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__init__.py +65 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py +180 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__init__.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import (
|
18 |
+
OptionalDependencyNotAvailable,
|
19 |
+
_LazyModule,
|
20 |
+
is_flax_available,
|
21 |
+
is_torch_available,
|
22 |
+
is_vision_available,
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
_import_structure = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
|
27 |
+
|
28 |
+
try:
|
29 |
+
if not is_vision_available():
|
30 |
+
raise OptionalDependencyNotAvailable()
|
31 |
+
except OptionalDependencyNotAvailable:
|
32 |
+
pass
|
33 |
+
else:
|
34 |
+
_import_structure["feature_extraction_beit"] = ["BeitFeatureExtractor"]
|
35 |
+
_import_structure["image_processing_beit"] = ["BeitImageProcessor"]
|
36 |
+
|
37 |
+
try:
|
38 |
+
if not is_torch_available():
|
39 |
+
raise OptionalDependencyNotAvailable()
|
40 |
+
except OptionalDependencyNotAvailable:
|
41 |
+
pass
|
42 |
+
else:
|
43 |
+
_import_structure["modeling_beit"] = [
|
44 |
+
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
45 |
+
"BeitForImageClassification",
|
46 |
+
"BeitForMaskedImageModeling",
|
47 |
+
"BeitForSemanticSegmentation",
|
48 |
+
"BeitModel",
|
49 |
+
"BeitPreTrainedModel",
|
50 |
+
"BeitBackbone",
|
51 |
+
]
|
52 |
+
|
53 |
+
|
54 |
+
try:
|
55 |
+
if not is_flax_available():
|
56 |
+
raise OptionalDependencyNotAvailable()
|
57 |
+
except OptionalDependencyNotAvailable:
|
58 |
+
pass
|
59 |
+
else:
|
60 |
+
_import_structure["modeling_flax_beit"] = [
|
61 |
+
"FlaxBeitForImageClassification",
|
62 |
+
"FlaxBeitForMaskedImageModeling",
|
63 |
+
"FlaxBeitModel",
|
64 |
+
"FlaxBeitPreTrainedModel",
|
65 |
+
]
|
66 |
+
|
67 |
+
if TYPE_CHECKING:
|
68 |
+
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
|
69 |
+
|
70 |
+
try:
|
71 |
+
if not is_vision_available():
|
72 |
+
raise OptionalDependencyNotAvailable()
|
73 |
+
except OptionalDependencyNotAvailable:
|
74 |
+
pass
|
75 |
+
else:
|
76 |
+
from .feature_extraction_beit import BeitFeatureExtractor
|
77 |
+
from .image_processing_beit import BeitImageProcessor
|
78 |
+
|
79 |
+
try:
|
80 |
+
if not is_torch_available():
|
81 |
+
raise OptionalDependencyNotAvailable()
|
82 |
+
except OptionalDependencyNotAvailable:
|
83 |
+
pass
|
84 |
+
else:
|
85 |
+
from .modeling_beit import (
|
86 |
+
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
87 |
+
BeitBackbone,
|
88 |
+
BeitForImageClassification,
|
89 |
+
BeitForMaskedImageModeling,
|
90 |
+
BeitForSemanticSegmentation,
|
91 |
+
BeitModel,
|
92 |
+
BeitPreTrainedModel,
|
93 |
+
)
|
94 |
+
|
95 |
+
try:
|
96 |
+
if not is_flax_available():
|
97 |
+
raise OptionalDependencyNotAvailable()
|
98 |
+
except OptionalDependencyNotAvailable:
|
99 |
+
pass
|
100 |
+
else:
|
101 |
+
from .modeling_flax_beit import (
|
102 |
+
FlaxBeitForImageClassification,
|
103 |
+
FlaxBeitForMaskedImageModeling,
|
104 |
+
FlaxBeitModel,
|
105 |
+
FlaxBeitPreTrainedModel,
|
106 |
+
)
|
107 |
+
|
108 |
+
|
109 |
+
else:
|
110 |
+
import sys
|
111 |
+
|
112 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.68 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc
ADDED
Binary file (10.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc
ADDED
Binary file (1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc
ADDED
Binary file (44.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc
ADDED
Binary file (28.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" BEiT model configuration"""
|
16 |
+
from collections import OrderedDict
|
17 |
+
from typing import Mapping
|
18 |
+
|
19 |
+
from packaging import version
|
20 |
+
|
21 |
+
from ...configuration_utils import PretrainedConfig
|
22 |
+
from ...onnx import OnnxConfig
|
23 |
+
from ...utils import logging
|
24 |
+
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
from ..deprecated._archive_maps import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
31 |
+
|
32 |
+
|
33 |
+
class BeitConfig(BackboneConfigMixin, PretrainedConfig):
|
34 |
+
r"""
|
35 |
+
This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT
|
36 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
37 |
+
defaults will yield a similar configuration to that of the BEiT
|
38 |
+
[microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
vocab_size (`int`, *optional*, defaults to 8192):
|
42 |
+
Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during
|
43 |
+
pre-training.
|
44 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
45 |
+
Dimensionality of the encoder layers and the pooler layer.
|
46 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
47 |
+
Number of hidden layers in the Transformer encoder.
|
48 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
49 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
50 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
51 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
52 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
53 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
54 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
55 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
56 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
57 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
|
58 |
+
The dropout ratio for the attention probabilities.
|
59 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
60 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
61 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
62 |
+
The epsilon used by the layer normalization layers.
|
63 |
+
image_size (`int`, *optional*, defaults to 224):
|
64 |
+
The size (resolution) of each image.
|
65 |
+
patch_size (`int`, *optional*, defaults to 16):
|
66 |
+
The size (resolution) of each patch.
|
67 |
+
num_channels (`int`, *optional*, defaults to 3):
|
68 |
+
The number of input channels.
|
69 |
+
use_mask_token (`bool`, *optional*, defaults to `False`):
|
70 |
+
Whether to use a mask token for masked image modeling.
|
71 |
+
use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
|
72 |
+
Whether to use BERT-style absolute position embeddings.
|
73 |
+
use_relative_position_bias (`bool`, *optional*, defaults to `False`):
|
74 |
+
Whether to use T5-style relative position embeddings in the self-attention layers.
|
75 |
+
use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
|
76 |
+
Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
|
77 |
+
layer_scale_init_value (`float`, *optional*, defaults to 0.1):
|
78 |
+
Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
|
79 |
+
drop_path_rate (`float`, *optional*, defaults to 0.1):
|
80 |
+
Stochastic depth rate per sample (when applied in the main path of residual layers).
|
81 |
+
use_mean_pooling (`bool`, *optional*, defaults to `True`):
|
82 |
+
Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
|
83 |
+
CLS token, before applying the classification head.
|
84 |
+
pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
|
85 |
+
Pooling scales used in Pooling Pyramid Module applied on the last feature map.
|
86 |
+
use_auxiliary_head (`bool`, *optional*, defaults to `True`):
|
87 |
+
Whether to use an auxiliary head during training.
|
88 |
+
auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
|
89 |
+
Weight of the cross-entropy loss of the auxiliary head.
|
90 |
+
auxiliary_channels (`int`, *optional*, defaults to 256):
|
91 |
+
Number of channels to use in the auxiliary head.
|
92 |
+
auxiliary_num_convs (`int`, *optional*, defaults to 1):
|
93 |
+
Number of convolutional layers to use in the auxiliary head.
|
94 |
+
auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
|
95 |
+
Whether to concatenate the output of the auxiliary head with the input before the classification layer.
|
96 |
+
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
|
97 |
+
The index that is ignored by the loss function of the semantic segmentation model.
|
98 |
+
out_features (`List[str]`, *optional*):
|
99 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
100 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
101 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
|
102 |
+
same order as defined in the `stage_names` attribute.
|
103 |
+
out_indices (`List[int]`, *optional*):
|
104 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
105 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
106 |
+
If unset and `out_features` is unset, will default to the last stage. Must be in the
|
107 |
+
same order as defined in the `stage_names` attribute.
|
108 |
+
add_fpn (`bool`, *optional*, defaults to `False`):
|
109 |
+
Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`].
|
110 |
+
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
|
111 |
+
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
|
112 |
+
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
|
113 |
+
seq_len, hidden_size)`. Only relevant for [`BeitBackbone`].
|
114 |
+
|
115 |
+
Example:
|
116 |
+
|
117 |
+
```python
|
118 |
+
>>> from transformers import BeitConfig, BeitModel
|
119 |
+
|
120 |
+
>>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
|
121 |
+
>>> configuration = BeitConfig()
|
122 |
+
|
123 |
+
>>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
|
124 |
+
>>> model = BeitModel(configuration)
|
125 |
+
|
126 |
+
>>> # Accessing the model configuration
|
127 |
+
>>> configuration = model.config
|
128 |
+
```"""
|
129 |
+
|
130 |
+
model_type = "beit"
|
131 |
+
|
132 |
+
def __init__(
|
133 |
+
self,
|
134 |
+
vocab_size=8192,
|
135 |
+
hidden_size=768,
|
136 |
+
num_hidden_layers=12,
|
137 |
+
num_attention_heads=12,
|
138 |
+
intermediate_size=3072,
|
139 |
+
hidden_act="gelu",
|
140 |
+
hidden_dropout_prob=0.0,
|
141 |
+
attention_probs_dropout_prob=0.0,
|
142 |
+
initializer_range=0.02,
|
143 |
+
layer_norm_eps=1e-12,
|
144 |
+
image_size=224,
|
145 |
+
patch_size=16,
|
146 |
+
num_channels=3,
|
147 |
+
use_mask_token=False,
|
148 |
+
use_absolute_position_embeddings=False,
|
149 |
+
use_relative_position_bias=False,
|
150 |
+
use_shared_relative_position_bias=False,
|
151 |
+
layer_scale_init_value=0.1,
|
152 |
+
drop_path_rate=0.1,
|
153 |
+
use_mean_pooling=True,
|
154 |
+
pool_scales=[1, 2, 3, 6],
|
155 |
+
use_auxiliary_head=True,
|
156 |
+
auxiliary_loss_weight=0.4,
|
157 |
+
auxiliary_channels=256,
|
158 |
+
auxiliary_num_convs=1,
|
159 |
+
auxiliary_concat_input=False,
|
160 |
+
semantic_loss_ignore_index=255,
|
161 |
+
out_features=None,
|
162 |
+
out_indices=None,
|
163 |
+
add_fpn=False,
|
164 |
+
reshape_hidden_states=True,
|
165 |
+
**kwargs,
|
166 |
+
):
|
167 |
+
super().__init__(**kwargs)
|
168 |
+
|
169 |
+
self.vocab_size = vocab_size
|
170 |
+
self.hidden_size = hidden_size
|
171 |
+
self.num_hidden_layers = num_hidden_layers
|
172 |
+
self.num_attention_heads = num_attention_heads
|
173 |
+
self.intermediate_size = intermediate_size
|
174 |
+
self.hidden_act = hidden_act
|
175 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
176 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
177 |
+
self.initializer_range = initializer_range
|
178 |
+
self.layer_norm_eps = layer_norm_eps
|
179 |
+
|
180 |
+
self.image_size = image_size
|
181 |
+
self.patch_size = patch_size
|
182 |
+
self.num_channels = num_channels
|
183 |
+
self.use_mask_token = use_mask_token
|
184 |
+
self.use_absolute_position_embeddings = use_absolute_position_embeddings
|
185 |
+
self.use_relative_position_bias = use_relative_position_bias
|
186 |
+
self.use_shared_relative_position_bias = use_shared_relative_position_bias
|
187 |
+
self.layer_scale_init_value = layer_scale_init_value
|
188 |
+
self.drop_path_rate = drop_path_rate
|
189 |
+
self.use_mean_pooling = use_mean_pooling
|
190 |
+
# decode head attributes (semantic segmentation)
|
191 |
+
self.pool_scales = pool_scales
|
192 |
+
# auxiliary head attributes (semantic segmentation)
|
193 |
+
self.use_auxiliary_head = use_auxiliary_head
|
194 |
+
self.auxiliary_loss_weight = auxiliary_loss_weight
|
195 |
+
self.auxiliary_channels = auxiliary_channels
|
196 |
+
self.auxiliary_num_convs = auxiliary_num_convs
|
197 |
+
self.auxiliary_concat_input = auxiliary_concat_input
|
198 |
+
self.semantic_loss_ignore_index = semantic_loss_ignore_index
|
199 |
+
|
200 |
+
# handle backwards compatibility
|
201 |
+
if "segmentation_indices" in kwargs:
|
202 |
+
logger.warning(
|
203 |
+
"The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.",
|
204 |
+
FutureWarning,
|
205 |
+
)
|
206 |
+
out_indices = kwargs.pop("segmentation_indices")
|
207 |
+
|
208 |
+
# backbone attributes
|
209 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)]
|
210 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
211 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
212 |
+
)
|
213 |
+
self.add_fpn = add_fpn
|
214 |
+
self.reshape_hidden_states = reshape_hidden_states
|
215 |
+
|
216 |
+
|
217 |
+
# Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
|
218 |
+
class BeitOnnxConfig(OnnxConfig):
|
219 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
220 |
+
|
221 |
+
@property
|
222 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
223 |
+
return OrderedDict(
|
224 |
+
[
|
225 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
226 |
+
]
|
227 |
+
)
|
228 |
+
|
229 |
+
@property
|
230 |
+
def atol_for_validation(self) -> float:
|
231 |
+
return 1e-4
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py
ADDED
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert BEiT checkpoints from the unilm repository."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import json
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import requests
|
23 |
+
import torch
|
24 |
+
from datasets import load_dataset
|
25 |
+
from huggingface_hub import hf_hub_download
|
26 |
+
from PIL import Image
|
27 |
+
|
28 |
+
from transformers import (
|
29 |
+
BeitConfig,
|
30 |
+
BeitForImageClassification,
|
31 |
+
BeitForMaskedImageModeling,
|
32 |
+
BeitForSemanticSegmentation,
|
33 |
+
BeitImageProcessor,
|
34 |
+
)
|
35 |
+
from transformers.image_utils import PILImageResampling
|
36 |
+
from transformers.utils import logging
|
37 |
+
|
38 |
+
|
39 |
+
logging.set_verbosity_info()
|
40 |
+
logger = logging.get_logger(__name__)
|
41 |
+
|
42 |
+
|
43 |
+
# here we list all keys to be renamed (original name on the left, our name on the right)
|
44 |
+
def create_rename_keys(config, has_lm_head=False, is_semantic=False):
|
45 |
+
prefix = "backbone." if is_semantic else ""
|
46 |
+
|
47 |
+
rename_keys = []
|
48 |
+
for i in range(config.num_hidden_layers):
|
49 |
+
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
|
50 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
|
51 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
|
52 |
+
rename_keys.append(
|
53 |
+
(f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
|
54 |
+
)
|
55 |
+
rename_keys.append(
|
56 |
+
(f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
|
57 |
+
)
|
58 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
|
59 |
+
rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
|
60 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
|
61 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
|
62 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
|
63 |
+
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
|
64 |
+
|
65 |
+
# projection layer + position embeddings
|
66 |
+
rename_keys.extend(
|
67 |
+
[
|
68 |
+
(f"{prefix}cls_token", "beit.embeddings.cls_token"),
|
69 |
+
(f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
|
70 |
+
(f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
|
71 |
+
]
|
72 |
+
)
|
73 |
+
|
74 |
+
if has_lm_head:
|
75 |
+
# mask token + shared relative position bias + layernorm
|
76 |
+
rename_keys.extend(
|
77 |
+
[
|
78 |
+
("mask_token", "beit.embeddings.mask_token"),
|
79 |
+
(
|
80 |
+
"rel_pos_bias.relative_position_bias_table",
|
81 |
+
"beit.encoder.relative_position_bias.relative_position_bias_table",
|
82 |
+
),
|
83 |
+
(
|
84 |
+
"rel_pos_bias.relative_position_index",
|
85 |
+
"beit.encoder.relative_position_bias.relative_position_index",
|
86 |
+
),
|
87 |
+
("norm.weight", "layernorm.weight"),
|
88 |
+
("norm.bias", "layernorm.bias"),
|
89 |
+
]
|
90 |
+
)
|
91 |
+
elif is_semantic:
|
92 |
+
# semantic segmentation classification heads
|
93 |
+
rename_keys.extend(
|
94 |
+
[
|
95 |
+
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
|
96 |
+
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
|
97 |
+
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
|
98 |
+
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
|
99 |
+
]
|
100 |
+
)
|
101 |
+
else:
|
102 |
+
# layernorm + classification head
|
103 |
+
rename_keys.extend(
|
104 |
+
[
|
105 |
+
("fc_norm.weight", "beit.pooler.layernorm.weight"),
|
106 |
+
("fc_norm.bias", "beit.pooler.layernorm.bias"),
|
107 |
+
("head.weight", "classifier.weight"),
|
108 |
+
("head.bias", "classifier.bias"),
|
109 |
+
]
|
110 |
+
)
|
111 |
+
|
112 |
+
return rename_keys
|
113 |
+
|
114 |
+
|
115 |
+
# we split up the matrix of each encoder layer into queries, keys and values
|
116 |
+
def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
|
117 |
+
for i in range(config.num_hidden_layers):
|
118 |
+
prefix = "backbone." if is_semantic else ""
|
119 |
+
# queries, keys and values
|
120 |
+
in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
|
121 |
+
q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
|
122 |
+
v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
|
123 |
+
|
124 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
|
125 |
+
: config.hidden_size, :
|
126 |
+
]
|
127 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
|
128 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
|
129 |
+
config.hidden_size : config.hidden_size * 2, :
|
130 |
+
]
|
131 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
|
132 |
+
-config.hidden_size :, :
|
133 |
+
]
|
134 |
+
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
|
135 |
+
|
136 |
+
# gamma_1 and gamma_2
|
137 |
+
# we call them lambda because otherwise they are renamed when using .from_pretrained
|
138 |
+
gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
|
139 |
+
gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
|
140 |
+
|
141 |
+
state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
|
142 |
+
state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
|
143 |
+
|
144 |
+
# relative_position bias table + index
|
145 |
+
if not has_lm_head:
|
146 |
+
# each layer has its own relative position bias
|
147 |
+
table = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_bias_table")
|
148 |
+
index = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_index")
|
149 |
+
|
150 |
+
state_dict[
|
151 |
+
f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"
|
152 |
+
] = table
|
153 |
+
state_dict[
|
154 |
+
f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"
|
155 |
+
] = index
|
156 |
+
|
157 |
+
|
158 |
+
def rename_key(dct, old, new):
|
159 |
+
val = dct.pop(old)
|
160 |
+
dct[new] = val
|
161 |
+
|
162 |
+
|
163 |
+
# We will verify our results on an image of cute cats
|
164 |
+
def prepare_img():
|
165 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
166 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
167 |
+
return im
|
168 |
+
|
169 |
+
|
170 |
+
@torch.no_grad()
|
171 |
+
def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path):
|
172 |
+
"""
|
173 |
+
Copy/paste/tweak model's weights to our BEiT structure.
|
174 |
+
"""
|
175 |
+
|
176 |
+
# define default BEiT configuration
|
177 |
+
config = BeitConfig()
|
178 |
+
has_lm_head = False
|
179 |
+
is_semantic = False
|
180 |
+
repo_id = "huggingface/label-files"
|
181 |
+
# set config parameters based on URL
|
182 |
+
if checkpoint_url[-9:-4] == "pt22k":
|
183 |
+
# masked image modeling
|
184 |
+
config.use_shared_relative_position_bias = True
|
185 |
+
config.use_mask_token = True
|
186 |
+
has_lm_head = True
|
187 |
+
elif checkpoint_url[-9:-4] == "ft22k":
|
188 |
+
# intermediate fine-tuning on ImageNet-22k
|
189 |
+
config.use_relative_position_bias = True
|
190 |
+
config.num_labels = 21841
|
191 |
+
filename = "imagenet-22k-id2label.json"
|
192 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
193 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
194 |
+
# this dataset contains 21843 labels but the model only has 21841
|
195 |
+
# we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
|
196 |
+
del id2label[9205]
|
197 |
+
del id2label[15027]
|
198 |
+
config.id2label = id2label
|
199 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
200 |
+
elif checkpoint_url[-8:-4] == "to1k":
|
201 |
+
# fine-tuning on ImageNet-1k
|
202 |
+
config.use_relative_position_bias = True
|
203 |
+
config.num_labels = 1000
|
204 |
+
filename = "imagenet-1k-id2label.json"
|
205 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
206 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
207 |
+
config.id2label = id2label
|
208 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
209 |
+
if "384" in checkpoint_url:
|
210 |
+
config.image_size = 384
|
211 |
+
if "512" in checkpoint_url:
|
212 |
+
config.image_size = 512
|
213 |
+
elif "ade20k" in checkpoint_url:
|
214 |
+
# fine-tuning
|
215 |
+
config.use_relative_position_bias = True
|
216 |
+
config.num_labels = 150
|
217 |
+
filename = "ade20k-id2label.json"
|
218 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
219 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
220 |
+
config.id2label = id2label
|
221 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
222 |
+
config.image_size = 640
|
223 |
+
is_semantic = True
|
224 |
+
else:
|
225 |
+
raise ValueError("Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'")
|
226 |
+
|
227 |
+
# size of the architecture
|
228 |
+
if "base" in checkpoint_url:
|
229 |
+
pass
|
230 |
+
elif "large" in checkpoint_url:
|
231 |
+
config.hidden_size = 1024
|
232 |
+
config.intermediate_size = 4096
|
233 |
+
config.num_hidden_layers = 24
|
234 |
+
config.num_attention_heads = 16
|
235 |
+
if "ade20k" in checkpoint_url:
|
236 |
+
config.image_size = 640
|
237 |
+
config.out_indices = [7, 11, 15, 23]
|
238 |
+
else:
|
239 |
+
raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
|
240 |
+
|
241 |
+
# load state_dict of original model, remove and rename some keys
|
242 |
+
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)
|
243 |
+
state_dict = state_dict["model"] if "ade20k" not in checkpoint_url else state_dict["state_dict"]
|
244 |
+
|
245 |
+
rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic)
|
246 |
+
for src, dest in rename_keys:
|
247 |
+
rename_key(state_dict, src, dest)
|
248 |
+
read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic)
|
249 |
+
if is_semantic:
|
250 |
+
# add prefix to decoder keys
|
251 |
+
for key, val in state_dict.copy().items():
|
252 |
+
val = state_dict.pop(key)
|
253 |
+
if key.startswith("backbone.fpn"):
|
254 |
+
key = key.replace("backbone.fpn", "fpn")
|
255 |
+
state_dict[key] = val
|
256 |
+
|
257 |
+
# load HuggingFace model
|
258 |
+
if checkpoint_url[-9:-4] == "pt22k":
|
259 |
+
model = BeitForMaskedImageModeling(config)
|
260 |
+
elif "ade20k" in checkpoint_url:
|
261 |
+
model = BeitForSemanticSegmentation(config)
|
262 |
+
else:
|
263 |
+
model = BeitForImageClassification(config)
|
264 |
+
model.eval()
|
265 |
+
model.load_state_dict(state_dict)
|
266 |
+
|
267 |
+
# Check outputs on an image
|
268 |
+
if is_semantic:
|
269 |
+
image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
|
270 |
+
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
|
271 |
+
image = Image.open(ds[0]["file"])
|
272 |
+
else:
|
273 |
+
image_processor = BeitImageProcessor(
|
274 |
+
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
|
275 |
+
)
|
276 |
+
image = prepare_img()
|
277 |
+
|
278 |
+
encoding = image_processor(images=image, return_tensors="pt")
|
279 |
+
pixel_values = encoding["pixel_values"]
|
280 |
+
|
281 |
+
outputs = model(pixel_values)
|
282 |
+
logits = outputs.logits
|
283 |
+
|
284 |
+
# verify logits
|
285 |
+
expected_shape = torch.Size([1, 1000])
|
286 |
+
if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"):
|
287 |
+
expected_shape = torch.Size([1, 196, 8192])
|
288 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"):
|
289 |
+
expected_shape = torch.Size([1, 196, 8192])
|
290 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"):
|
291 |
+
expected_shape = torch.Size([1, 21841])
|
292 |
+
expected_logits = torch.tensor([2.2288, 2.4671, 0.7395])
|
293 |
+
expected_class_idx = 2397
|
294 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"):
|
295 |
+
expected_shape = torch.Size([1, 21841])
|
296 |
+
expected_logits = torch.tensor([1.6881, -0.2787, 0.5901])
|
297 |
+
expected_class_idx = 2396
|
298 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"):
|
299 |
+
expected_logits = torch.tensor([0.1241, 0.0798, -0.6569])
|
300 |
+
expected_class_idx = 285
|
301 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"):
|
302 |
+
expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108])
|
303 |
+
expected_class_idx = 281
|
304 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"):
|
305 |
+
expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147])
|
306 |
+
expected_class_idx = 761
|
307 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"):
|
308 |
+
expected_logits = torch.tensor([0.4610, -0.0928, 0.2086])
|
309 |
+
expected_class_idx = 761
|
310 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22kto1k"):
|
311 |
+
expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837])
|
312 |
+
expected_class_idx = 761
|
313 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_384_pt22k_ft22kto1k"):
|
314 |
+
expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]])
|
315 |
+
expected_class_idx = 761
|
316 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_512_pt22k_ft22kto1k"):
|
317 |
+
expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852])
|
318 |
+
expected_class_idx = 761
|
319 |
+
elif checkpoint_url[:-4].endswith("beit_base_patch16_640_pt22k_ft22ktoade20k"):
|
320 |
+
expected_shape = (1, 150, 160, 160)
|
321 |
+
expected_logits = torch.tensor(
|
322 |
+
[
|
323 |
+
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
|
324 |
+
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
|
325 |
+
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
|
326 |
+
]
|
327 |
+
)
|
328 |
+
elif checkpoint_url[:-4].endswith("beit_large_patch16_640_pt22k_ft22ktoade20k"):
|
329 |
+
expected_shape = (1, 150, 160, 160)
|
330 |
+
expected_logits = torch.tensor(
|
331 |
+
[
|
332 |
+
[[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]],
|
333 |
+
[[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]],
|
334 |
+
[[0.9895, 4.3467, 4.7663], [4.2476, 5.6830, 6.1518], [4.5550, 6.2495, 6.5154]],
|
335 |
+
]
|
336 |
+
)
|
337 |
+
else:
|
338 |
+
raise ValueError("Can't verify logits as model is not supported")
|
339 |
+
|
340 |
+
if logits.shape != expected_shape:
|
341 |
+
raise ValueError(f"Shape of logits not as expected. {logits.shape=}, {expected_shape=}")
|
342 |
+
if not has_lm_head:
|
343 |
+
if is_semantic:
|
344 |
+
if not torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-3):
|
345 |
+
raise ValueError("First elements of logits not as expected")
|
346 |
+
else:
|
347 |
+
print("Predicted class idx:", logits.argmax(-1).item())
|
348 |
+
|
349 |
+
if not torch.allclose(logits[0, :3], expected_logits, atol=1e-3):
|
350 |
+
raise ValueError("First elements of logits not as expected")
|
351 |
+
if logits.argmax(-1).item() != expected_class_idx:
|
352 |
+
raise ValueError("Predicted class index not as expected")
|
353 |
+
|
354 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
355 |
+
print(f"Saving model to {pytorch_dump_folder_path}")
|
356 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
357 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
358 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
359 |
+
|
360 |
+
|
361 |
+
if __name__ == "__main__":
|
362 |
+
parser = argparse.ArgumentParser()
|
363 |
+
|
364 |
+
parser.add_argument(
|
365 |
+
"--checkpoint_url",
|
366 |
+
default="https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth",
|
367 |
+
type=str,
|
368 |
+
help="URL to the original PyTorch checkpoint (.pth file).",
|
369 |
+
)
|
370 |
+
parser.add_argument(
|
371 |
+
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
|
372 |
+
)
|
373 |
+
args = parser.parse_args()
|
374 |
+
convert_beit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Feature extractor class for BEiT."""
|
16 |
+
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
from ...utils import logging
|
20 |
+
from .image_processing_beit import BeitImageProcessor
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
class BeitFeatureExtractor(BeitImageProcessor):
|
27 |
+
def __init__(self, *args, **kwargs) -> None:
|
28 |
+
warnings.warn(
|
29 |
+
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
|
30 |
+
" use BeitImageProcessor instead.",
|
31 |
+
FutureWarning,
|
32 |
+
)
|
33 |
+
super().__init__(*args, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py
ADDED
@@ -0,0 +1,531 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for Beit."""
|
16 |
+
|
17 |
+
import warnings
|
18 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
23 |
+
from ...image_transforms import resize, to_channel_dimension_format
|
24 |
+
from ...image_utils import (
|
25 |
+
IMAGENET_STANDARD_MEAN,
|
26 |
+
IMAGENET_STANDARD_STD,
|
27 |
+
ChannelDimension,
|
28 |
+
ImageInput,
|
29 |
+
PILImageResampling,
|
30 |
+
infer_channel_dimension_format,
|
31 |
+
is_scaled_image,
|
32 |
+
make_list_of_images,
|
33 |
+
to_numpy_array,
|
34 |
+
valid_images,
|
35 |
+
validate_kwargs,
|
36 |
+
validate_preprocess_arguments,
|
37 |
+
)
|
38 |
+
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
|
39 |
+
|
40 |
+
|
41 |
+
if is_vision_available():
|
42 |
+
import PIL
|
43 |
+
|
44 |
+
if is_torch_available():
|
45 |
+
import torch
|
46 |
+
|
47 |
+
|
48 |
+
logger = logging.get_logger(__name__)
|
49 |
+
|
50 |
+
|
51 |
+
class BeitImageProcessor(BaseImageProcessor):
|
52 |
+
r"""
|
53 |
+
Constructs a BEiT image processor.
|
54 |
+
|
55 |
+
Args:
|
56 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
57 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
|
58 |
+
`do_resize` parameter in the `preprocess` method.
|
59 |
+
size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
|
60 |
+
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
|
61 |
+
method.
|
62 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
63 |
+
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
|
64 |
+
`preprocess` method.
|
65 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
66 |
+
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
|
67 |
+
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
|
68 |
+
`preprocess` method.
|
69 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
|
70 |
+
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
|
71 |
+
Can be overridden by the `crop_size` parameter in the `preprocess` method.
|
72 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
73 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
74 |
+
`preprocess` method.
|
75 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
76 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
77 |
+
parameter in the `preprocess` method.
|
78 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
79 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
80 |
+
method.
|
81 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
82 |
+
The mean to use if normalizing the image. This is a float or list of floats of length of the number of
|
83 |
+
channels of the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
84 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
85 |
+
The standard deviation to use if normalizing the image. This is a float or list of floats of length of the
|
86 |
+
number of channels of the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
87 |
+
do_reduce_labels (`bool`, *optional*, defaults to `False`):
|
88 |
+
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
|
89 |
+
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
|
90 |
+
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
|
91 |
+
`preprocess` method.
|
92 |
+
"""
|
93 |
+
|
94 |
+
model_input_names = ["pixel_values"]
|
95 |
+
|
96 |
+
def __init__(
|
97 |
+
self,
|
98 |
+
do_resize: bool = True,
|
99 |
+
size: Dict[str, int] = None,
|
100 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
101 |
+
do_center_crop: bool = True,
|
102 |
+
crop_size: Dict[str, int] = None,
|
103 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
104 |
+
do_rescale: bool = True,
|
105 |
+
do_normalize: bool = True,
|
106 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
107 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
108 |
+
do_reduce_labels: bool = False,
|
109 |
+
**kwargs,
|
110 |
+
) -> None:
|
111 |
+
if "reduce_labels" in kwargs:
|
112 |
+
warnings.warn(
|
113 |
+
"The `reduce_labels` parameter is deprecated and will be removed in a future version. Please use"
|
114 |
+
" `do_reduce_labels` instead.",
|
115 |
+
FutureWarning,
|
116 |
+
)
|
117 |
+
do_reduce_labels = kwargs.pop("reduce_labels")
|
118 |
+
super().__init__(**kwargs)
|
119 |
+
size = size if size is not None else {"height": 256, "width": 256}
|
120 |
+
size = get_size_dict(size)
|
121 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
122 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
123 |
+
self.do_resize = do_resize
|
124 |
+
self.size = size
|
125 |
+
self.resample = resample
|
126 |
+
self.do_center_crop = do_center_crop
|
127 |
+
self.crop_size = crop_size
|
128 |
+
self.do_rescale = do_rescale
|
129 |
+
self.rescale_factor = rescale_factor
|
130 |
+
self.do_normalize = do_normalize
|
131 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
|
132 |
+
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
|
133 |
+
self.do_reduce_labels = do_reduce_labels
|
134 |
+
self._valid_processor_keys = [
|
135 |
+
"images",
|
136 |
+
"segmentation_maps",
|
137 |
+
"do_resize",
|
138 |
+
"size",
|
139 |
+
"resample",
|
140 |
+
"do_center_crop",
|
141 |
+
"crop_size",
|
142 |
+
"do_rescale",
|
143 |
+
"rescale_factor",
|
144 |
+
"do_normalize",
|
145 |
+
"image_mean",
|
146 |
+
"image_std",
|
147 |
+
"do_reduce_labels",
|
148 |
+
"return_tensors",
|
149 |
+
"data_format",
|
150 |
+
"input_data_format",
|
151 |
+
]
|
152 |
+
|
153 |
+
@classmethod
|
154 |
+
def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
|
155 |
+
"""
|
156 |
+
Overrides the `from_dict` method from the base class to make sure `reduce_labels` is updated if image processor
|
157 |
+
is created using from_dict and kwargs e.g. `BeitImageProcessor.from_pretrained(checkpoint, reduce_labels=True)`
|
158 |
+
"""
|
159 |
+
image_processor_dict = image_processor_dict.copy()
|
160 |
+
if "reduce_labels" in kwargs:
|
161 |
+
image_processor_dict["reduce_labels"] = kwargs.pop("reduce_labels")
|
162 |
+
return super().from_dict(image_processor_dict, **kwargs)
|
163 |
+
|
164 |
+
def resize(
|
165 |
+
self,
|
166 |
+
image: np.ndarray,
|
167 |
+
size: Dict[str, int],
|
168 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
169 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
170 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
171 |
+
**kwargs,
|
172 |
+
) -> np.ndarray:
|
173 |
+
"""
|
174 |
+
Resize an image to (size["height"], size["width"]).
|
175 |
+
|
176 |
+
Args:
|
177 |
+
image (`np.ndarray`):
|
178 |
+
Image to resize.
|
179 |
+
size (`Dict[str, int]`):
|
180 |
+
Size of the output image.
|
181 |
+
resample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):
|
182 |
+
Resampling filter to use when resiizing the image.
|
183 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
184 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
185 |
+
input_data_format (`str` or `ChannelDimension`, *optional*):
|
186 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
187 |
+
"""
|
188 |
+
size = get_size_dict(size, default_to_square=True, param_name="size")
|
189 |
+
if "height" not in size or "width" not in size:
|
190 |
+
raise ValueError(f"The `size` argument must contain `height` and `width` keys. Got {size.keys()}")
|
191 |
+
return resize(
|
192 |
+
image,
|
193 |
+
size=(size["height"], size["width"]),
|
194 |
+
resample=resample,
|
195 |
+
data_format=data_format,
|
196 |
+
input_data_format=input_data_format,
|
197 |
+
**kwargs,
|
198 |
+
)
|
199 |
+
|
200 |
+
def reduce_label(self, label: ImageInput) -> np.ndarray:
|
201 |
+
label = to_numpy_array(label)
|
202 |
+
# Avoid using underflow conversion
|
203 |
+
label[label == 0] = 255
|
204 |
+
label = label - 1
|
205 |
+
label[label == 254] = 255
|
206 |
+
return label
|
207 |
+
|
208 |
+
def _preprocess(
|
209 |
+
self,
|
210 |
+
image: ImageInput,
|
211 |
+
do_reduce_labels: bool = None,
|
212 |
+
do_resize: bool = None,
|
213 |
+
size: Dict[str, int] = None,
|
214 |
+
resample: PILImageResampling = None,
|
215 |
+
do_center_crop: bool = None,
|
216 |
+
crop_size: Dict[str, int] = None,
|
217 |
+
do_rescale: bool = None,
|
218 |
+
rescale_factor: float = None,
|
219 |
+
do_normalize: bool = None,
|
220 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
221 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
222 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
223 |
+
):
|
224 |
+
if do_reduce_labels:
|
225 |
+
image = self.reduce_label(image)
|
226 |
+
|
227 |
+
if do_resize:
|
228 |
+
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
229 |
+
|
230 |
+
if do_center_crop:
|
231 |
+
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
|
232 |
+
|
233 |
+
if do_rescale:
|
234 |
+
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
235 |
+
|
236 |
+
if do_normalize:
|
237 |
+
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
238 |
+
|
239 |
+
return image
|
240 |
+
|
241 |
+
def _preprocess_image(
|
242 |
+
self,
|
243 |
+
image: ImageInput,
|
244 |
+
do_resize: bool = None,
|
245 |
+
size: Dict[str, int] = None,
|
246 |
+
resample: PILImageResampling = None,
|
247 |
+
do_center_crop: bool = None,
|
248 |
+
crop_size: Dict[str, int] = None,
|
249 |
+
do_rescale: bool = None,
|
250 |
+
rescale_factor: float = None,
|
251 |
+
do_normalize: bool = None,
|
252 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
253 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
254 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
255 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
256 |
+
) -> np.ndarray:
|
257 |
+
"""Preprocesses a single image."""
|
258 |
+
# All transformations expect numpy arrays.
|
259 |
+
image = to_numpy_array(image)
|
260 |
+
if is_scaled_image(image) and do_rescale:
|
261 |
+
logger.warning_once(
|
262 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
263 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
264 |
+
)
|
265 |
+
if input_data_format is None:
|
266 |
+
input_data_format = infer_channel_dimension_format(image)
|
267 |
+
image = self._preprocess(
|
268 |
+
image,
|
269 |
+
do_reduce_labels=False,
|
270 |
+
do_resize=do_resize,
|
271 |
+
size=size,
|
272 |
+
resample=resample,
|
273 |
+
do_center_crop=do_center_crop,
|
274 |
+
crop_size=crop_size,
|
275 |
+
do_rescale=do_rescale,
|
276 |
+
rescale_factor=rescale_factor,
|
277 |
+
do_normalize=do_normalize,
|
278 |
+
image_mean=image_mean,
|
279 |
+
image_std=image_std,
|
280 |
+
input_data_format=input_data_format,
|
281 |
+
)
|
282 |
+
if data_format is not None:
|
283 |
+
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
284 |
+
return image
|
285 |
+
|
286 |
+
def _preprocess_segmentation_map(
|
287 |
+
self,
|
288 |
+
segmentation_map: ImageInput,
|
289 |
+
do_resize: bool = None,
|
290 |
+
size: Dict[str, int] = None,
|
291 |
+
resample: PILImageResampling = None,
|
292 |
+
do_center_crop: bool = None,
|
293 |
+
crop_size: Dict[str, int] = None,
|
294 |
+
do_reduce_labels: bool = None,
|
295 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
296 |
+
):
|
297 |
+
"""Preprocesses a single segmentation map."""
|
298 |
+
# All transformations expect numpy arrays.
|
299 |
+
segmentation_map = to_numpy_array(segmentation_map)
|
300 |
+
# Add an axis to the segmentation maps for transformations.
|
301 |
+
if segmentation_map.ndim == 2:
|
302 |
+
segmentation_map = segmentation_map[None, ...]
|
303 |
+
added_dimension = True
|
304 |
+
input_data_format = ChannelDimension.FIRST
|
305 |
+
else:
|
306 |
+
added_dimension = False
|
307 |
+
if input_data_format is None:
|
308 |
+
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
|
309 |
+
segmentation_map = self._preprocess(
|
310 |
+
image=segmentation_map,
|
311 |
+
do_reduce_labels=do_reduce_labels,
|
312 |
+
do_resize=do_resize,
|
313 |
+
resample=resample,
|
314 |
+
size=size,
|
315 |
+
do_center_crop=do_center_crop,
|
316 |
+
crop_size=crop_size,
|
317 |
+
do_normalize=False,
|
318 |
+
do_rescale=False,
|
319 |
+
input_data_format=ChannelDimension.FIRST,
|
320 |
+
)
|
321 |
+
# Remove extra axis if added
|
322 |
+
if added_dimension:
|
323 |
+
segmentation_map = np.squeeze(segmentation_map, axis=0)
|
324 |
+
segmentation_map = segmentation_map.astype(np.int64)
|
325 |
+
return segmentation_map
|
326 |
+
|
327 |
+
def __call__(self, images, segmentation_maps=None, **kwargs):
|
328 |
+
# Overrides the `__call__` method of the `Preprocessor` class such that the images and segmentation maps can both
|
329 |
+
# be passed in as positional arguments.
|
330 |
+
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
|
331 |
+
|
332 |
+
def preprocess(
|
333 |
+
self,
|
334 |
+
images: ImageInput,
|
335 |
+
segmentation_maps: Optional[ImageInput] = None,
|
336 |
+
do_resize: bool = None,
|
337 |
+
size: Dict[str, int] = None,
|
338 |
+
resample: PILImageResampling = None,
|
339 |
+
do_center_crop: bool = None,
|
340 |
+
crop_size: Dict[str, int] = None,
|
341 |
+
do_rescale: bool = None,
|
342 |
+
rescale_factor: float = None,
|
343 |
+
do_normalize: bool = None,
|
344 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
345 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
346 |
+
do_reduce_labels: Optional[bool] = None,
|
347 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
348 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
349 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
350 |
+
**kwargs,
|
351 |
+
) -> PIL.Image.Image:
|
352 |
+
"""
|
353 |
+
Preprocess an image or batch of images.
|
354 |
+
|
355 |
+
Args:
|
356 |
+
images (`ImageInput`):
|
357 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
358 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
359 |
+
segmentation_maps (`ImageInput`, *optional*)
|
360 |
+
Segmentation maps to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
361 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
362 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
363 |
+
Whether to resize the image.
|
364 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
365 |
+
Size of the image after resizing.
|
366 |
+
resample (`int`, *optional*, defaults to `self.resample`):
|
367 |
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
|
368 |
+
has an effect if `do_resize` is set to `True`.
|
369 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
370 |
+
Whether to center crop the image.
|
371 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
372 |
+
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
|
373 |
+
padded with zeros and then cropped
|
374 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
375 |
+
Whether to rescale the image values between [0 - 1].
|
376 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
377 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
378 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
379 |
+
Whether to normalize the image.
|
380 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
381 |
+
Image mean.
|
382 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
383 |
+
Image standard deviation.
|
384 |
+
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
|
385 |
+
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
|
386 |
+
is used for background, and background itself is not included in all classes of a dataset (e.g.
|
387 |
+
ADE20k). The background label will be replaced by 255.
|
388 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
389 |
+
The type of tensors to return. Can be one of:
|
390 |
+
- Unset: Return a list of `np.ndarray`.
|
391 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
392 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
393 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
394 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
395 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
396 |
+
The channel dimension format for the output image. Can be one of:
|
397 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
398 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
399 |
+
- Unset: Use the channel dimension format of the input image.
|
400 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
401 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
402 |
+
from the input image. Can be one of:
|
403 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
404 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
405 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
406 |
+
"""
|
407 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
408 |
+
size = size if size is not None else self.size
|
409 |
+
size = get_size_dict(size, default_to_square=True, param_name="size")
|
410 |
+
resample = resample if resample is not None else self.resample
|
411 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
412 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
413 |
+
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
|
414 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
415 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
416 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
417 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
418 |
+
image_std = image_std if image_std is not None else self.image_std
|
419 |
+
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
|
420 |
+
|
421 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
422 |
+
|
423 |
+
images = make_list_of_images(images)
|
424 |
+
|
425 |
+
if segmentation_maps is not None:
|
426 |
+
segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
|
427 |
+
|
428 |
+
if segmentation_maps is not None and not valid_images(segmentation_maps):
|
429 |
+
raise ValueError(
|
430 |
+
"Invalid segmentation_maps type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
431 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
432 |
+
)
|
433 |
+
if not valid_images(images):
|
434 |
+
raise ValueError(
|
435 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
436 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
437 |
+
)
|
438 |
+
|
439 |
+
validate_preprocess_arguments(
|
440 |
+
do_rescale=do_rescale,
|
441 |
+
rescale_factor=rescale_factor,
|
442 |
+
do_normalize=do_normalize,
|
443 |
+
image_mean=image_mean,
|
444 |
+
image_std=image_std,
|
445 |
+
do_center_crop=do_center_crop,
|
446 |
+
crop_size=crop_size,
|
447 |
+
do_resize=do_resize,
|
448 |
+
size=size,
|
449 |
+
resample=resample,
|
450 |
+
)
|
451 |
+
|
452 |
+
images = [
|
453 |
+
self._preprocess_image(
|
454 |
+
image=img,
|
455 |
+
do_resize=do_resize,
|
456 |
+
do_center_crop=do_center_crop,
|
457 |
+
do_rescale=do_rescale,
|
458 |
+
do_normalize=do_normalize,
|
459 |
+
resample=resample,
|
460 |
+
size=size,
|
461 |
+
rescale_factor=rescale_factor,
|
462 |
+
crop_size=crop_size,
|
463 |
+
image_mean=image_mean,
|
464 |
+
image_std=image_std,
|
465 |
+
data_format=data_format,
|
466 |
+
input_data_format=input_data_format,
|
467 |
+
)
|
468 |
+
for img in images
|
469 |
+
]
|
470 |
+
|
471 |
+
data = {"pixel_values": images}
|
472 |
+
|
473 |
+
if segmentation_maps is not None:
|
474 |
+
segmentation_maps = [
|
475 |
+
self._preprocess_segmentation_map(
|
476 |
+
segmentation_map=segmentation_map,
|
477 |
+
do_reduce_labels=do_reduce_labels,
|
478 |
+
do_resize=do_resize,
|
479 |
+
resample=resample,
|
480 |
+
size=size,
|
481 |
+
do_center_crop=do_center_crop,
|
482 |
+
crop_size=crop_size,
|
483 |
+
)
|
484 |
+
for segmentation_map in segmentation_maps
|
485 |
+
]
|
486 |
+
data["labels"] = segmentation_maps
|
487 |
+
|
488 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
489 |
+
|
490 |
+
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
|
491 |
+
"""
|
492 |
+
Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
|
493 |
+
|
494 |
+
Args:
|
495 |
+
outputs ([`BeitForSemanticSegmentation`]):
|
496 |
+
Raw outputs of the model.
|
497 |
+
target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
|
498 |
+
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
|
499 |
+
predictions will not be resized.
|
500 |
+
|
501 |
+
Returns:
|
502 |
+
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
|
503 |
+
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
|
504 |
+
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
|
505 |
+
"""
|
506 |
+
# TODO: add support for other frameworks
|
507 |
+
logits = outputs.logits
|
508 |
+
|
509 |
+
# Resize logits and compute semantic segmentation maps
|
510 |
+
if target_sizes is not None:
|
511 |
+
if len(logits) != len(target_sizes):
|
512 |
+
raise ValueError(
|
513 |
+
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
|
514 |
+
)
|
515 |
+
|
516 |
+
if is_torch_tensor(target_sizes):
|
517 |
+
target_sizes = target_sizes.numpy()
|
518 |
+
|
519 |
+
semantic_segmentation = []
|
520 |
+
|
521 |
+
for idx in range(len(logits)):
|
522 |
+
resized_logits = torch.nn.functional.interpolate(
|
523 |
+
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
|
524 |
+
)
|
525 |
+
semantic_map = resized_logits[0].argmax(dim=0)
|
526 |
+
semantic_segmentation.append(semantic_map)
|
527 |
+
else:
|
528 |
+
semantic_segmentation = logits.argmax(dim=1)
|
529 |
+
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
|
530 |
+
|
531 |
+
return semantic_segmentation
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py
ADDED
@@ -0,0 +1,1425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch BEiT model."""
|
16 |
+
|
17 |
+
|
18 |
+
import collections.abc
|
19 |
+
import math
|
20 |
+
from dataclasses import dataclass
|
21 |
+
from typing import List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.utils.checkpoint
|
25 |
+
from torch import Tensor, nn
|
26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
27 |
+
|
28 |
+
from ...activations import ACT2FN
|
29 |
+
from ...modeling_outputs import (
|
30 |
+
BackboneOutput,
|
31 |
+
BaseModelOutput,
|
32 |
+
BaseModelOutputWithPooling,
|
33 |
+
ImageClassifierOutput,
|
34 |
+
MaskedLMOutput,
|
35 |
+
SemanticSegmenterOutput,
|
36 |
+
)
|
37 |
+
from ...modeling_utils import PreTrainedModel
|
38 |
+
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
|
39 |
+
from ...utils import (
|
40 |
+
add_code_sample_docstrings,
|
41 |
+
add_start_docstrings,
|
42 |
+
add_start_docstrings_to_model_forward,
|
43 |
+
logging,
|
44 |
+
replace_return_docstrings,
|
45 |
+
)
|
46 |
+
from ...utils.backbone_utils import BackboneMixin
|
47 |
+
from .configuration_beit import BeitConfig
|
48 |
+
|
49 |
+
|
50 |
+
logger = logging.get_logger(__name__)
|
51 |
+
|
52 |
+
# General docstring
|
53 |
+
_CONFIG_FOR_DOC = "BeitConfig"
|
54 |
+
|
55 |
+
# Base docstring
|
56 |
+
_CHECKPOINT_FOR_DOC = "microsoft/beit-base-patch16-224-pt22k"
|
57 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
|
58 |
+
|
59 |
+
# Image classification docstring
|
60 |
+
_IMAGE_CLASS_CHECKPOINT = "microsoft/beit-base-patch16-224"
|
61 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
62 |
+
|
63 |
+
|
64 |
+
from ..deprecated._archive_maps import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
65 |
+
|
66 |
+
|
67 |
+
@dataclass
|
68 |
+
class BeitModelOutputWithPooling(BaseModelOutputWithPooling):
|
69 |
+
"""
|
70 |
+
Class for outputs of [`BeitModel`].
|
71 |
+
|
72 |
+
Args:
|
73 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
74 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
75 |
+
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
|
76 |
+
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
|
77 |
+
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
|
78 |
+
will be returned.
|
79 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
80 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
81 |
+
shape `(batch_size, sequence_length, hidden_size)`.
|
82 |
+
|
83 |
+
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
84 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
85 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
86 |
+
sequence_length)`.
|
87 |
+
|
88 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
89 |
+
heads.
|
90 |
+
"""
|
91 |
+
|
92 |
+
|
93 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
94 |
+
"""
|
95 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
96 |
+
|
97 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
98 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
99 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
100 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
101 |
+
argument.
|
102 |
+
"""
|
103 |
+
if drop_prob == 0.0 or not training:
|
104 |
+
return input
|
105 |
+
keep_prob = 1 - drop_prob
|
106 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
107 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
108 |
+
random_tensor.floor_() # binarize
|
109 |
+
output = input.div(keep_prob) * random_tensor
|
110 |
+
return output
|
111 |
+
|
112 |
+
|
113 |
+
class BeitDropPath(nn.Module):
|
114 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
115 |
+
|
116 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
117 |
+
super().__init__()
|
118 |
+
self.drop_prob = drop_prob
|
119 |
+
|
120 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
121 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
122 |
+
|
123 |
+
def extra_repr(self) -> str:
|
124 |
+
return "p={}".format(self.drop_prob)
|
125 |
+
|
126 |
+
|
127 |
+
# Based on timm implementation, which can be found here:
|
128 |
+
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
129 |
+
class BeitEmbeddings(nn.Module):
|
130 |
+
"""
|
131 |
+
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
|
132 |
+
|
133 |
+
"""
|
134 |
+
|
135 |
+
def __init__(self, config: BeitConfig) -> None:
|
136 |
+
super().__init__()
|
137 |
+
|
138 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
139 |
+
if config.use_mask_token:
|
140 |
+
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
141 |
+
else:
|
142 |
+
self.mask_token = None
|
143 |
+
self.patch_embeddings = BeitPatchEmbeddings(config)
|
144 |
+
num_patches = self.patch_embeddings.num_patches
|
145 |
+
if config.use_absolute_position_embeddings:
|
146 |
+
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
|
147 |
+
else:
|
148 |
+
self.position_embeddings = None
|
149 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
150 |
+
|
151 |
+
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
|
152 |
+
embeddings, (patch_height, patch_width) = self.patch_embeddings(
|
153 |
+
pixel_values, self.position_embeddings[:, 1:, :] if self.position_embeddings is not None else None
|
154 |
+
)
|
155 |
+
batch_size, seq_len, _ = embeddings.size()
|
156 |
+
|
157 |
+
if bool_masked_pos is not None:
|
158 |
+
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
|
159 |
+
# replace the masked visual tokens by mask_tokens
|
160 |
+
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
|
161 |
+
embeddings = embeddings * (1 - w) + mask_tokens * w
|
162 |
+
|
163 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
164 |
+
if self.position_embeddings is not None:
|
165 |
+
cls_tokens = cls_tokens + self.position_embeddings[:, :1, :]
|
166 |
+
|
167 |
+
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
|
168 |
+
|
169 |
+
embeddings = self.dropout(embeddings)
|
170 |
+
|
171 |
+
return embeddings, (patch_height, patch_width)
|
172 |
+
|
173 |
+
|
174 |
+
class BeitPatchEmbeddings(nn.Module):
|
175 |
+
"""
|
176 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
177 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
|
178 |
+
Transformer.
|
179 |
+
"""
|
180 |
+
|
181 |
+
def __init__(self, config):
|
182 |
+
super().__init__()
|
183 |
+
image_size, patch_size = config.image_size, config.patch_size
|
184 |
+
num_channels, hidden_size = config.num_channels, config.hidden_size
|
185 |
+
|
186 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
187 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
188 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
189 |
+
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
|
190 |
+
self.image_size = image_size
|
191 |
+
self.patch_size = patch_size
|
192 |
+
self.num_channels = num_channels
|
193 |
+
self.num_patches = num_patches
|
194 |
+
self.patch_shape = patch_shape
|
195 |
+
|
196 |
+
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
|
197 |
+
|
198 |
+
def forward(self, pixel_values: torch.Tensor, position_embedding: Optional[torch.Tensor] = None) -> torch.Tensor:
|
199 |
+
batch_size, num_channels, height, width = pixel_values.shape
|
200 |
+
if num_channels != self.num_channels:
|
201 |
+
raise ValueError(
|
202 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
203 |
+
)
|
204 |
+
|
205 |
+
embeddings = self.projection(pixel_values)
|
206 |
+
patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
|
207 |
+
|
208 |
+
if position_embedding is not None:
|
209 |
+
# interpolate the position embedding to the corresponding size
|
210 |
+
position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(
|
211 |
+
0, 3, 1, 2
|
212 |
+
)
|
213 |
+
position_embedding = nn.functional.interpolate(
|
214 |
+
position_embedding, size=(patch_height, patch_width), mode="bicubic"
|
215 |
+
)
|
216 |
+
embeddings = embeddings + position_embedding
|
217 |
+
|
218 |
+
embeddings = embeddings.flatten(2).transpose(1, 2)
|
219 |
+
|
220 |
+
return embeddings, (patch_height, patch_width)
|
221 |
+
|
222 |
+
|
223 |
+
class BeitSelfAttention(nn.Module):
|
224 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
|
225 |
+
super().__init__()
|
226 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
227 |
+
raise ValueError(
|
228 |
+
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
|
229 |
+
f"heads {config.num_attention_heads}."
|
230 |
+
)
|
231 |
+
|
232 |
+
self.num_attention_heads = config.num_attention_heads
|
233 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
234 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
235 |
+
|
236 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
237 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
|
238 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
239 |
+
|
240 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
241 |
+
|
242 |
+
if window_size:
|
243 |
+
self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
|
244 |
+
else:
|
245 |
+
self.relative_position_bias = None
|
246 |
+
|
247 |
+
def transpose_for_scores(self, x):
|
248 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
249 |
+
x = x.view(*new_x_shape)
|
250 |
+
return x.permute(0, 2, 1, 3)
|
251 |
+
|
252 |
+
def forward(
|
253 |
+
self,
|
254 |
+
hidden_states: torch.Tensor,
|
255 |
+
head_mask: Optional[torch.Tensor] = None,
|
256 |
+
output_attentions: bool = False,
|
257 |
+
relative_position_bias: Optional["BeitRelativePositionBias"] = None,
|
258 |
+
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
|
259 |
+
mixed_query_layer = self.query(hidden_states)
|
260 |
+
|
261 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
262 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
263 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
264 |
+
|
265 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
266 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
267 |
+
|
268 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
269 |
+
|
270 |
+
# Add relative position bias if present.
|
271 |
+
if self.relative_position_bias is not None:
|
272 |
+
attention_scores = attention_scores + self.relative_position_bias().unsqueeze(0)
|
273 |
+
|
274 |
+
# Add shared relative position bias if provided.
|
275 |
+
if relative_position_bias is not None:
|
276 |
+
attention_scores = attention_scores + relative_position_bias
|
277 |
+
|
278 |
+
# Normalize the attention scores to probabilities.
|
279 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
280 |
+
|
281 |
+
# This is actually dropping out entire tokens to attend to, which might
|
282 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
283 |
+
attention_probs = self.dropout(attention_probs)
|
284 |
+
|
285 |
+
# Mask heads if we want to
|
286 |
+
if head_mask is not None:
|
287 |
+
attention_probs = attention_probs * head_mask
|
288 |
+
|
289 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
290 |
+
|
291 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
292 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
293 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
294 |
+
|
295 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
296 |
+
|
297 |
+
return outputs
|
298 |
+
|
299 |
+
|
300 |
+
class BeitSelfOutput(nn.Module):
|
301 |
+
"""
|
302 |
+
The residual connection is defined in BeitLayer instead of here (as is the case with other models), due to the
|
303 |
+
layernorm applied before each block.
|
304 |
+
"""
|
305 |
+
|
306 |
+
def __init__(self, config: BeitConfig) -> None:
|
307 |
+
super().__init__()
|
308 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
309 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
310 |
+
|
311 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
|
312 |
+
hidden_states = self.dense(hidden_states)
|
313 |
+
hidden_states = self.dropout(hidden_states)
|
314 |
+
|
315 |
+
return hidden_states
|
316 |
+
|
317 |
+
|
318 |
+
class BeitAttention(nn.Module):
|
319 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
|
320 |
+
super().__init__()
|
321 |
+
self.attention = BeitSelfAttention(config, window_size=window_size)
|
322 |
+
self.output = BeitSelfOutput(config)
|
323 |
+
self.pruned_heads = set()
|
324 |
+
|
325 |
+
def prune_heads(self, heads):
|
326 |
+
if len(heads) == 0:
|
327 |
+
return
|
328 |
+
heads, index = find_pruneable_heads_and_indices(
|
329 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
330 |
+
)
|
331 |
+
|
332 |
+
# Prune linear layers
|
333 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
334 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
335 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
336 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
337 |
+
|
338 |
+
# Update hyper params and store pruned heads
|
339 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
340 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
341 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
342 |
+
|
343 |
+
def forward(
|
344 |
+
self,
|
345 |
+
hidden_states: torch.Tensor,
|
346 |
+
head_mask: Optional[torch.Tensor] = None,
|
347 |
+
output_attentions: bool = False,
|
348 |
+
relative_position_bias: Optional["BeitRelativePositionBias"] = None,
|
349 |
+
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
|
350 |
+
self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias)
|
351 |
+
|
352 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
353 |
+
|
354 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
355 |
+
return outputs
|
356 |
+
|
357 |
+
|
358 |
+
class BeitIntermediate(nn.Module):
|
359 |
+
def __init__(self, config: BeitConfig) -> None:
|
360 |
+
super().__init__()
|
361 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
362 |
+
if isinstance(config.hidden_act, str):
|
363 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
364 |
+
else:
|
365 |
+
self.intermediate_act_fn = config.hidden_act
|
366 |
+
|
367 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
368 |
+
hidden_states = self.dense(hidden_states)
|
369 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
370 |
+
|
371 |
+
return hidden_states
|
372 |
+
|
373 |
+
|
374 |
+
class BeitOutput(nn.Module):
|
375 |
+
def __init__(self, config: BeitConfig) -> None:
|
376 |
+
super().__init__()
|
377 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
378 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
379 |
+
|
380 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
381 |
+
hidden_states = self.dense(hidden_states)
|
382 |
+
hidden_states = self.dropout(hidden_states)
|
383 |
+
|
384 |
+
return hidden_states
|
385 |
+
|
386 |
+
|
387 |
+
class BeitLayer(nn.Module):
|
388 |
+
"""This corresponds to the Block class in the timm implementation."""
|
389 |
+
|
390 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0) -> None:
|
391 |
+
super().__init__()
|
392 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
393 |
+
self.seq_len_dim = 1
|
394 |
+
self.attention = BeitAttention(config, window_size=window_size)
|
395 |
+
self.intermediate = BeitIntermediate(config)
|
396 |
+
self.output = BeitOutput(config)
|
397 |
+
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
398 |
+
self.drop_path = BeitDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
|
399 |
+
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
400 |
+
|
401 |
+
init_values = config.layer_scale_init_value
|
402 |
+
if init_values > 0:
|
403 |
+
self.lambda_1 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
|
404 |
+
self.lambda_2 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
|
405 |
+
else:
|
406 |
+
self.lambda_1, self.lambda_2 = None, None
|
407 |
+
|
408 |
+
def forward(
|
409 |
+
self,
|
410 |
+
hidden_states: torch.Tensor,
|
411 |
+
head_mask: Optional[torch.Tensor] = None,
|
412 |
+
output_attentions: bool = False,
|
413 |
+
relative_position_bias: Optional["BeitRelativePositionBias"] = None,
|
414 |
+
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
|
415 |
+
self_attention_outputs = self.attention(
|
416 |
+
self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
|
417 |
+
head_mask,
|
418 |
+
output_attentions=output_attentions,
|
419 |
+
relative_position_bias=relative_position_bias,
|
420 |
+
)
|
421 |
+
attention_output = self_attention_outputs[0]
|
422 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
423 |
+
|
424 |
+
# apply lambda_1 if present
|
425 |
+
if self.lambda_1 is not None:
|
426 |
+
attention_output = self.lambda_1 * attention_output
|
427 |
+
|
428 |
+
# first residual connection
|
429 |
+
hidden_states = self.drop_path(attention_output) + hidden_states
|
430 |
+
|
431 |
+
# in BEiT, layernorm is also applied after self-attention
|
432 |
+
layer_output = self.layernorm_after(hidden_states)
|
433 |
+
|
434 |
+
layer_output = self.intermediate(layer_output)
|
435 |
+
layer_output = self.output(layer_output)
|
436 |
+
|
437 |
+
if self.lambda_2 is not None:
|
438 |
+
layer_output = self.lambda_2 * layer_output
|
439 |
+
|
440 |
+
# second residual connection
|
441 |
+
layer_output = self.drop_path(layer_output) + hidden_states
|
442 |
+
|
443 |
+
outputs = (layer_output,) + outputs
|
444 |
+
|
445 |
+
return outputs
|
446 |
+
|
447 |
+
|
448 |
+
class BeitRelativePositionBias(nn.Module):
|
449 |
+
def __init__(self, config: BeitConfig, window_size: tuple) -> None:
|
450 |
+
super().__init__()
|
451 |
+
self.window_size = window_size
|
452 |
+
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
453 |
+
self.relative_position_bias_table = nn.Parameter(
|
454 |
+
torch.zeros(self.num_relative_distance, config.num_attention_heads)
|
455 |
+
) # 2*Wh-1 * 2*Ww-1, nH
|
456 |
+
# cls to token & token 2 cls & cls to cls
|
457 |
+
|
458 |
+
# get pair-wise relative position index for each token inside the window
|
459 |
+
coords_h = torch.arange(window_size[0])
|
460 |
+
coords_w = torch.arange(window_size[1])
|
461 |
+
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) # 2, Wh, Ww
|
462 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
463 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
464 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
465 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
466 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
467 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
468 |
+
relative_position_index = torch.zeros(
|
469 |
+
size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
|
470 |
+
)
|
471 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
472 |
+
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
473 |
+
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
474 |
+
relative_position_index[0, 0] = self.num_relative_distance - 1
|
475 |
+
|
476 |
+
self.register_buffer("relative_position_index", relative_position_index, persistent=False)
|
477 |
+
|
478 |
+
def forward(self) -> torch.Tensor:
|
479 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
480 |
+
self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1
|
481 |
+
) # Wh*Ww,Wh*Ww,nH
|
482 |
+
|
483 |
+
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
484 |
+
|
485 |
+
|
486 |
+
class BeitEncoder(nn.Module):
|
487 |
+
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
|
488 |
+
super().__init__()
|
489 |
+
self.config = config
|
490 |
+
if config.use_shared_relative_position_bias:
|
491 |
+
self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
|
492 |
+
else:
|
493 |
+
self.relative_position_bias = None
|
494 |
+
|
495 |
+
# stochastic depth decay rule
|
496 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
497 |
+
self.layer = nn.ModuleList(
|
498 |
+
[
|
499 |
+
BeitLayer(
|
500 |
+
config,
|
501 |
+
window_size=window_size if config.use_relative_position_bias else None,
|
502 |
+
drop_path_rate=dpr[i],
|
503 |
+
)
|
504 |
+
for i in range(config.num_hidden_layers)
|
505 |
+
]
|
506 |
+
)
|
507 |
+
self.gradient_checkpointing = False
|
508 |
+
|
509 |
+
def forward(
|
510 |
+
self,
|
511 |
+
hidden_states: torch.Tensor,
|
512 |
+
head_mask: Optional[torch.Tensor] = None,
|
513 |
+
output_attentions: bool = False,
|
514 |
+
output_hidden_states: bool = False,
|
515 |
+
return_dict: bool = True,
|
516 |
+
) -> Union[tuple, BaseModelOutput]:
|
517 |
+
all_hidden_states = () if output_hidden_states else None
|
518 |
+
all_self_attentions = () if output_attentions else None
|
519 |
+
|
520 |
+
for i, layer_module in enumerate(self.layer):
|
521 |
+
if output_hidden_states:
|
522 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
523 |
+
|
524 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
525 |
+
|
526 |
+
if self.gradient_checkpointing and self.training:
|
527 |
+
layer_outputs = self._gradient_checkpointing_func(
|
528 |
+
layer_module.__call__,
|
529 |
+
hidden_states,
|
530 |
+
layer_head_mask,
|
531 |
+
output_attentions,
|
532 |
+
)
|
533 |
+
else:
|
534 |
+
relative_position_bias = (
|
535 |
+
self.relative_position_bias() if self.relative_position_bias is not None else None
|
536 |
+
)
|
537 |
+
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
|
538 |
+
|
539 |
+
hidden_states = layer_outputs[0]
|
540 |
+
|
541 |
+
if output_attentions:
|
542 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
543 |
+
|
544 |
+
if output_hidden_states:
|
545 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
546 |
+
|
547 |
+
if not return_dict:
|
548 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
549 |
+
return BaseModelOutput(
|
550 |
+
last_hidden_state=hidden_states,
|
551 |
+
hidden_states=all_hidden_states,
|
552 |
+
attentions=all_self_attentions,
|
553 |
+
)
|
554 |
+
|
555 |
+
|
556 |
+
class BeitPreTrainedModel(PreTrainedModel):
|
557 |
+
"""
|
558 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
559 |
+
models.
|
560 |
+
"""
|
561 |
+
|
562 |
+
config_class = BeitConfig
|
563 |
+
base_model_prefix = "beit"
|
564 |
+
main_input_name = "pixel_values"
|
565 |
+
supports_gradient_checkpointing = True
|
566 |
+
|
567 |
+
def _init_weights(self, module):
|
568 |
+
"""Initialize the weights"""
|
569 |
+
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
|
570 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
571 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
572 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
573 |
+
if module.bias is not None:
|
574 |
+
module.bias.data.zero_()
|
575 |
+
elif isinstance(module, nn.Embedding):
|
576 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
577 |
+
if module.padding_idx is not None:
|
578 |
+
module.weight.data[module.padding_idx].zero_()
|
579 |
+
elif isinstance(module, nn.LayerNorm):
|
580 |
+
module.bias.data.zero_()
|
581 |
+
module.weight.data.fill_(1.0)
|
582 |
+
|
583 |
+
|
584 |
+
BEIT_START_DOCSTRING = r"""
|
585 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
586 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
587 |
+
behavior.
|
588 |
+
|
589 |
+
Parameters:
|
590 |
+
config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
|
591 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
592 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
593 |
+
"""
|
594 |
+
|
595 |
+
BEIT_INPUTS_DOCSTRING = r"""
|
596 |
+
Args:
|
597 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
598 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
599 |
+
[`BeitImageProcessor.__call__`] for details.
|
600 |
+
|
601 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
602 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
603 |
+
|
604 |
+
- 1 indicates the head is **not masked**,
|
605 |
+
- 0 indicates the head is **masked**.
|
606 |
+
|
607 |
+
output_attentions (`bool`, *optional*):
|
608 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
609 |
+
tensors for more detail.
|
610 |
+
output_hidden_states (`bool`, *optional*):
|
611 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
612 |
+
more detail.
|
613 |
+
return_dict (`bool`, *optional*):
|
614 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
615 |
+
"""
|
616 |
+
|
617 |
+
|
618 |
+
@add_start_docstrings(
|
619 |
+
"The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
|
620 |
+
BEIT_START_DOCSTRING,
|
621 |
+
)
|
622 |
+
class BeitModel(BeitPreTrainedModel):
|
623 |
+
def __init__(self, config: BeitConfig, add_pooling_layer: bool = True) -> None:
|
624 |
+
super().__init__(config)
|
625 |
+
self.config = config
|
626 |
+
|
627 |
+
self.embeddings = BeitEmbeddings(config)
|
628 |
+
self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
|
629 |
+
|
630 |
+
self.layernorm = (
|
631 |
+
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
632 |
+
)
|
633 |
+
self.pooler = BeitPooler(config) if add_pooling_layer else None
|
634 |
+
|
635 |
+
# Initialize weights and apply final processing
|
636 |
+
self.post_init()
|
637 |
+
|
638 |
+
def get_input_embeddings(self):
|
639 |
+
return self.embeddings.patch_embeddings
|
640 |
+
|
641 |
+
def _prune_heads(self, heads_to_prune):
|
642 |
+
"""
|
643 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
644 |
+
class PreTrainedModel
|
645 |
+
"""
|
646 |
+
for layer, heads in heads_to_prune.items():
|
647 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
648 |
+
|
649 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
650 |
+
@add_code_sample_docstrings(
|
651 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
652 |
+
output_type=BeitModelOutputWithPooling,
|
653 |
+
config_class=_CONFIG_FOR_DOC,
|
654 |
+
modality="vision",
|
655 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
656 |
+
)
|
657 |
+
def forward(
|
658 |
+
self,
|
659 |
+
pixel_values: Optional[torch.Tensor] = None,
|
660 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
661 |
+
head_mask: Optional[torch.Tensor] = None,
|
662 |
+
output_attentions: Optional[bool] = None,
|
663 |
+
output_hidden_states: Optional[bool] = None,
|
664 |
+
return_dict: Optional[bool] = None,
|
665 |
+
) -> Union[tuple, BeitModelOutputWithPooling]:
|
666 |
+
r"""
|
667 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
|
668 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
669 |
+
"""
|
670 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
671 |
+
output_hidden_states = (
|
672 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
673 |
+
)
|
674 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
675 |
+
|
676 |
+
if pixel_values is None:
|
677 |
+
raise ValueError("You have to specify pixel_values")
|
678 |
+
|
679 |
+
# Prepare head mask if needed
|
680 |
+
# 1.0 in head_mask indicate we keep the head
|
681 |
+
# attention_probs has shape bsz x n_heads x N x N
|
682 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
683 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
684 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
685 |
+
|
686 |
+
embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values, bool_masked_pos)
|
687 |
+
|
688 |
+
encoder_outputs = self.encoder(
|
689 |
+
embedding_output,
|
690 |
+
head_mask=head_mask,
|
691 |
+
output_attentions=output_attentions,
|
692 |
+
output_hidden_states=output_hidden_states,
|
693 |
+
return_dict=return_dict,
|
694 |
+
)
|
695 |
+
sequence_output = encoder_outputs[0]
|
696 |
+
sequence_output = self.layernorm(sequence_output)
|
697 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
698 |
+
|
699 |
+
if not return_dict:
|
700 |
+
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
|
701 |
+
return head_outputs + encoder_outputs[1:]
|
702 |
+
|
703 |
+
return BeitModelOutputWithPooling(
|
704 |
+
last_hidden_state=sequence_output,
|
705 |
+
pooler_output=pooled_output,
|
706 |
+
hidden_states=encoder_outputs.hidden_states,
|
707 |
+
attentions=encoder_outputs.attentions,
|
708 |
+
)
|
709 |
+
|
710 |
+
|
711 |
+
class BeitPooler(nn.Module):
|
712 |
+
def __init__(self, config: BeitConfig) -> None:
|
713 |
+
super().__init__()
|
714 |
+
self.layernorm = (
|
715 |
+
nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
|
716 |
+
)
|
717 |
+
|
718 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
719 |
+
if self.layernorm is not None:
|
720 |
+
# Mean pool the final hidden states of the patch tokens
|
721 |
+
patch_tokens = hidden_states[:, 1:, :]
|
722 |
+
pooled_output = self.layernorm(patch_tokens.mean(1))
|
723 |
+
else:
|
724 |
+
# Pool by simply taking the final hidden state of the [CLS] token
|
725 |
+
pooled_output = hidden_states[:, 0]
|
726 |
+
|
727 |
+
return pooled_output
|
728 |
+
|
729 |
+
|
730 |
+
@add_start_docstrings(
|
731 |
+
"""Beit Model transformer with a 'language' modeling head on top. BEiT does masked image modeling by predicting
|
732 |
+
visual tokens of a Vector-Quantize Variational Autoencoder (VQ-VAE), whereas other vision models like ViT and DeiT
|
733 |
+
predict RGB pixel values. As a result, this class is incompatible with [`AutoModelForMaskedImageModeling`], so you
|
734 |
+
will need to use [`BeitForMaskedImageModeling`] directly if you wish to do masked image modeling with BEiT.""",
|
735 |
+
BEIT_START_DOCSTRING,
|
736 |
+
)
|
737 |
+
class BeitForMaskedImageModeling(BeitPreTrainedModel):
|
738 |
+
def __init__(self, config: BeitConfig) -> None:
|
739 |
+
super().__init__(config)
|
740 |
+
|
741 |
+
self.num_labels = config.num_labels
|
742 |
+
self.beit = BeitModel(config, add_pooling_layer=False)
|
743 |
+
|
744 |
+
# Classifier head
|
745 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
746 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
|
747 |
+
|
748 |
+
# Initialize weights and apply final processing
|
749 |
+
self.post_init()
|
750 |
+
|
751 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
752 |
+
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
|
753 |
+
def forward(
|
754 |
+
self,
|
755 |
+
pixel_values: Optional[torch.Tensor] = None,
|
756 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
757 |
+
head_mask: Optional[torch.Tensor] = None,
|
758 |
+
labels: Optional[torch.Tensor] = None,
|
759 |
+
output_attentions: Optional[bool] = None,
|
760 |
+
output_hidden_states: Optional[bool] = None,
|
761 |
+
return_dict: Optional[bool] = None,
|
762 |
+
) -> Union[tuple, MaskedLMOutput]:
|
763 |
+
r"""
|
764 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
|
765 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
766 |
+
|
767 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
768 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
769 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
770 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
771 |
+
|
772 |
+
Returns:
|
773 |
+
|
774 |
+
Examples:
|
775 |
+
|
776 |
+
```python
|
777 |
+
>>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
|
778 |
+
>>> import torch
|
779 |
+
>>> from PIL import Image
|
780 |
+
>>> import requests
|
781 |
+
|
782 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
783 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
784 |
+
|
785 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
786 |
+
>>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
787 |
+
|
788 |
+
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
|
789 |
+
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
|
790 |
+
>>> # create random boolean mask of shape (batch_size, num_patches)
|
791 |
+
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
|
792 |
+
|
793 |
+
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
|
794 |
+
>>> loss, logits = outputs.loss, outputs.logits
|
795 |
+
>>> list(logits.shape)
|
796 |
+
[1, 196, 8192]
|
797 |
+
```"""
|
798 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
799 |
+
|
800 |
+
outputs = self.beit(
|
801 |
+
pixel_values,
|
802 |
+
bool_masked_pos=bool_masked_pos,
|
803 |
+
head_mask=head_mask,
|
804 |
+
output_attentions=output_attentions,
|
805 |
+
output_hidden_states=output_hidden_states,
|
806 |
+
return_dict=return_dict,
|
807 |
+
)
|
808 |
+
|
809 |
+
sequence_output = outputs[0]
|
810 |
+
sequence_output = self.layernorm(sequence_output)
|
811 |
+
prediction_scores = self.lm_head(sequence_output[:, 1:])
|
812 |
+
|
813 |
+
masked_lm_loss = None
|
814 |
+
if labels is not None:
|
815 |
+
loss_fct = CrossEntropyLoss() # -100 index = padding token
|
816 |
+
masked_lm_loss = loss_fct(prediction_scores[bool_masked_pos], labels)
|
817 |
+
|
818 |
+
if not return_dict:
|
819 |
+
output = (prediction_scores,) + outputs[1:]
|
820 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
821 |
+
|
822 |
+
return MaskedLMOutput(
|
823 |
+
loss=masked_lm_loss,
|
824 |
+
logits=prediction_scores,
|
825 |
+
hidden_states=outputs.hidden_states,
|
826 |
+
attentions=outputs.attentions,
|
827 |
+
)
|
828 |
+
|
829 |
+
|
830 |
+
@add_start_docstrings(
|
831 |
+
"""
|
832 |
+
Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
|
833 |
+
hidden states of the patch tokens) e.g. for ImageNet.
|
834 |
+
""",
|
835 |
+
BEIT_START_DOCSTRING,
|
836 |
+
)
|
837 |
+
class BeitForImageClassification(BeitPreTrainedModel):
|
838 |
+
def __init__(self, config: BeitConfig) -> None:
|
839 |
+
super().__init__(config)
|
840 |
+
|
841 |
+
self.num_labels = config.num_labels
|
842 |
+
self.beit = BeitModel(config, add_pooling_layer=True)
|
843 |
+
|
844 |
+
# Classifier head
|
845 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
846 |
+
|
847 |
+
# Initialize weights and apply final processing
|
848 |
+
self.post_init()
|
849 |
+
|
850 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
851 |
+
@add_code_sample_docstrings(
|
852 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
853 |
+
output_type=ImageClassifierOutput,
|
854 |
+
config_class=_CONFIG_FOR_DOC,
|
855 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
856 |
+
)
|
857 |
+
def forward(
|
858 |
+
self,
|
859 |
+
pixel_values: Optional[torch.Tensor] = None,
|
860 |
+
head_mask: Optional[torch.Tensor] = None,
|
861 |
+
labels: Optional[torch.Tensor] = None,
|
862 |
+
output_attentions: Optional[bool] = None,
|
863 |
+
output_hidden_states: Optional[bool] = None,
|
864 |
+
return_dict: Optional[bool] = None,
|
865 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
866 |
+
r"""
|
867 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
868 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
869 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
870 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
871 |
+
"""
|
872 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
873 |
+
outputs = self.beit(
|
874 |
+
pixel_values,
|
875 |
+
head_mask=head_mask,
|
876 |
+
output_attentions=output_attentions,
|
877 |
+
output_hidden_states=output_hidden_states,
|
878 |
+
return_dict=return_dict,
|
879 |
+
)
|
880 |
+
|
881 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
882 |
+
|
883 |
+
logits = self.classifier(pooled_output)
|
884 |
+
|
885 |
+
loss = None
|
886 |
+
if labels is not None:
|
887 |
+
if self.config.problem_type is None:
|
888 |
+
if self.num_labels == 1:
|
889 |
+
self.config.problem_type = "regression"
|
890 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
891 |
+
self.config.problem_type = "single_label_classification"
|
892 |
+
else:
|
893 |
+
self.config.problem_type = "multi_label_classification"
|
894 |
+
|
895 |
+
if self.config.problem_type == "regression":
|
896 |
+
loss_fct = MSELoss()
|
897 |
+
if self.num_labels == 1:
|
898 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
899 |
+
else:
|
900 |
+
loss = loss_fct(logits, labels)
|
901 |
+
elif self.config.problem_type == "single_label_classification":
|
902 |
+
loss_fct = CrossEntropyLoss()
|
903 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
904 |
+
elif self.config.problem_type == "multi_label_classification":
|
905 |
+
loss_fct = BCEWithLogitsLoss()
|
906 |
+
loss = loss_fct(logits, labels)
|
907 |
+
if not return_dict:
|
908 |
+
output = (logits,) + outputs[2:]
|
909 |
+
return ((loss,) + output) if loss is not None else output
|
910 |
+
|
911 |
+
return ImageClassifierOutput(
|
912 |
+
loss=loss,
|
913 |
+
logits=logits,
|
914 |
+
hidden_states=outputs.hidden_states,
|
915 |
+
attentions=outputs.attentions,
|
916 |
+
)
|
917 |
+
|
918 |
+
|
919 |
+
class BeitConvModule(nn.Module):
|
920 |
+
"""
|
921 |
+
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
|
922 |
+
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
|
923 |
+
|
924 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
925 |
+
"""
|
926 |
+
|
927 |
+
def __init__(
|
928 |
+
self,
|
929 |
+
in_channels: int,
|
930 |
+
out_channels: int,
|
931 |
+
kernel_size: Union[int, Tuple[int, int]],
|
932 |
+
padding: Union[int, Tuple[int, int], str] = 0,
|
933 |
+
bias: bool = False,
|
934 |
+
dilation: Union[int, Tuple[int, int]] = 1,
|
935 |
+
) -> None:
|
936 |
+
super().__init__()
|
937 |
+
self.conv = nn.Conv2d(
|
938 |
+
in_channels=in_channels,
|
939 |
+
out_channels=out_channels,
|
940 |
+
kernel_size=kernel_size,
|
941 |
+
padding=padding,
|
942 |
+
bias=bias,
|
943 |
+
dilation=dilation,
|
944 |
+
)
|
945 |
+
self.bn = nn.BatchNorm2d(out_channels)
|
946 |
+
self.activation = nn.ReLU()
|
947 |
+
|
948 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
949 |
+
output = self.conv(input)
|
950 |
+
output = self.bn(output)
|
951 |
+
output = self.activation(output)
|
952 |
+
|
953 |
+
return output
|
954 |
+
|
955 |
+
|
956 |
+
class BeitPyramidPoolingBlock(nn.Module):
|
957 |
+
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
|
958 |
+
super().__init__()
|
959 |
+
self.layers = [
|
960 |
+
nn.AdaptiveAvgPool2d(pool_scale),
|
961 |
+
BeitConvModule(in_channels, channels, kernel_size=1),
|
962 |
+
]
|
963 |
+
for i, layer in enumerate(self.layers):
|
964 |
+
self.add_module(str(i), layer)
|
965 |
+
|
966 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
967 |
+
hidden_state = input
|
968 |
+
for layer in self.layers:
|
969 |
+
hidden_state = layer(hidden_state)
|
970 |
+
return hidden_state
|
971 |
+
|
972 |
+
|
973 |
+
class BeitPyramidPoolingModule(nn.Module):
|
974 |
+
"""
|
975 |
+
Pyramid Pooling Module (PPM) used in PSPNet.
|
976 |
+
|
977 |
+
Args:
|
978 |
+
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
|
979 |
+
Module.
|
980 |
+
in_channels (int): Input channels.
|
981 |
+
channels (int): Channels after modules, before conv_seg.
|
982 |
+
align_corners (bool): align_corners argument of F.interpolate.
|
983 |
+
|
984 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
985 |
+
"""
|
986 |
+
|
987 |
+
def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
|
988 |
+
super().__init__()
|
989 |
+
self.pool_scales = pool_scales
|
990 |
+
self.align_corners = align_corners
|
991 |
+
self.in_channels = in_channels
|
992 |
+
self.channels = channels
|
993 |
+
self.blocks = []
|
994 |
+
for i, pool_scale in enumerate(pool_scales):
|
995 |
+
block = BeitPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
|
996 |
+
self.blocks.append(block)
|
997 |
+
self.add_module(str(i), block)
|
998 |
+
|
999 |
+
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
1000 |
+
ppm_outs = []
|
1001 |
+
for ppm in self.blocks:
|
1002 |
+
ppm_out = ppm(x)
|
1003 |
+
upsampled_ppm_out = nn.functional.interpolate(
|
1004 |
+
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
|
1005 |
+
)
|
1006 |
+
ppm_outs.append(upsampled_ppm_out)
|
1007 |
+
return ppm_outs
|
1008 |
+
|
1009 |
+
|
1010 |
+
class BeitUperHead(nn.Module):
|
1011 |
+
"""
|
1012 |
+
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
|
1013 |
+
[UPerNet](https://arxiv.org/abs/1807.10221).
|
1014 |
+
|
1015 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
1016 |
+
"""
|
1017 |
+
|
1018 |
+
def __init__(self, config: BeitConfig) -> None:
|
1019 |
+
super().__init__()
|
1020 |
+
|
1021 |
+
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
|
1022 |
+
self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
|
1023 |
+
self.channels = config.hidden_size
|
1024 |
+
self.align_corners = False
|
1025 |
+
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
|
1026 |
+
|
1027 |
+
# PSP Module
|
1028 |
+
self.psp_modules = BeitPyramidPoolingModule(
|
1029 |
+
self.pool_scales,
|
1030 |
+
self.in_channels[-1],
|
1031 |
+
self.channels,
|
1032 |
+
align_corners=self.align_corners,
|
1033 |
+
)
|
1034 |
+
self.bottleneck = BeitConvModule(
|
1035 |
+
self.in_channels[-1] + len(self.pool_scales) * self.channels,
|
1036 |
+
self.channels,
|
1037 |
+
kernel_size=3,
|
1038 |
+
padding=1,
|
1039 |
+
)
|
1040 |
+
# FPN Module
|
1041 |
+
self.lateral_convs = nn.ModuleList()
|
1042 |
+
self.fpn_convs = nn.ModuleList()
|
1043 |
+
for in_channels in self.in_channels[:-1]: # skip the top layer
|
1044 |
+
l_conv = BeitConvModule(in_channels, self.channels, kernel_size=1)
|
1045 |
+
fpn_conv = BeitConvModule(self.channels, self.channels, kernel_size=3, padding=1)
|
1046 |
+
self.lateral_convs.append(l_conv)
|
1047 |
+
self.fpn_convs.append(fpn_conv)
|
1048 |
+
|
1049 |
+
self.fpn_bottleneck = BeitConvModule(
|
1050 |
+
len(self.in_channels) * self.channels,
|
1051 |
+
self.channels,
|
1052 |
+
kernel_size=3,
|
1053 |
+
padding=1,
|
1054 |
+
)
|
1055 |
+
|
1056 |
+
def psp_forward(self, inputs):
|
1057 |
+
x = inputs[-1]
|
1058 |
+
psp_outs = [x]
|
1059 |
+
psp_outs.extend(self.psp_modules(x))
|
1060 |
+
psp_outs = torch.cat(psp_outs, dim=1)
|
1061 |
+
output = self.bottleneck(psp_outs)
|
1062 |
+
|
1063 |
+
return output
|
1064 |
+
|
1065 |
+
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
|
1066 |
+
# build laterals
|
1067 |
+
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
|
1068 |
+
|
1069 |
+
laterals.append(self.psp_forward(encoder_hidden_states))
|
1070 |
+
|
1071 |
+
# build top-down path
|
1072 |
+
used_backbone_levels = len(laterals)
|
1073 |
+
for i in range(used_backbone_levels - 1, 0, -1):
|
1074 |
+
prev_shape = laterals[i - 1].shape[2:]
|
1075 |
+
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
|
1076 |
+
laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
|
1077 |
+
)
|
1078 |
+
|
1079 |
+
# build outputs
|
1080 |
+
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
|
1081 |
+
# append psp feature
|
1082 |
+
fpn_outs.append(laterals[-1])
|
1083 |
+
|
1084 |
+
for i in range(used_backbone_levels - 1, 0, -1):
|
1085 |
+
fpn_outs[i] = nn.functional.interpolate(
|
1086 |
+
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
|
1087 |
+
)
|
1088 |
+
fpn_outs = torch.cat(fpn_outs, dim=1)
|
1089 |
+
output = self.fpn_bottleneck(fpn_outs)
|
1090 |
+
output = self.classifier(output)
|
1091 |
+
|
1092 |
+
return output
|
1093 |
+
|
1094 |
+
|
1095 |
+
class BeitFCNHead(nn.Module):
|
1096 |
+
"""
|
1097 |
+
Fully Convolution Networks for Semantic Segmentation. This head is implemented of
|
1098 |
+
[FCNNet](https://arxiv.org/abs/1411.4038>).
|
1099 |
+
|
1100 |
+
Args:
|
1101 |
+
config (BeitConfig): Configuration.
|
1102 |
+
in_channels
|
1103 |
+
kernel_size (int): The kernel size for convs in the head. Default: 3.
|
1104 |
+
dilation (int): The dilation rate for convs in the head. Default: 1.
|
1105 |
+
|
1106 |
+
|
1107 |
+
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
|
1108 |
+
"""
|
1109 |
+
|
1110 |
+
def __init__(
|
1111 |
+
self, config: BeitConfig, in_index: int = 2, kernel_size: int = 3, dilation: Union[int, Tuple[int, int]] = 1
|
1112 |
+
) -> None:
|
1113 |
+
super().__init__()
|
1114 |
+
self.in_channels = config.hidden_size
|
1115 |
+
self.channels = config.auxiliary_channels
|
1116 |
+
self.num_convs = config.auxiliary_num_convs
|
1117 |
+
self.concat_input = config.auxiliary_concat_input
|
1118 |
+
self.in_index = in_index
|
1119 |
+
|
1120 |
+
conv_padding = (kernel_size // 2) * dilation
|
1121 |
+
convs = []
|
1122 |
+
convs.append(
|
1123 |
+
BeitConvModule(
|
1124 |
+
self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
|
1125 |
+
)
|
1126 |
+
)
|
1127 |
+
for i in range(self.num_convs - 1):
|
1128 |
+
convs.append(
|
1129 |
+
BeitConvModule(
|
1130 |
+
self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
|
1131 |
+
)
|
1132 |
+
)
|
1133 |
+
if self.num_convs == 0:
|
1134 |
+
self.convs = nn.Identity()
|
1135 |
+
else:
|
1136 |
+
self.convs = nn.Sequential(*convs)
|
1137 |
+
if self.concat_input:
|
1138 |
+
self.conv_cat = BeitConvModule(
|
1139 |
+
self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
|
1140 |
+
)
|
1141 |
+
|
1142 |
+
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
|
1143 |
+
|
1144 |
+
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
|
1145 |
+
# just take the relevant feature maps
|
1146 |
+
hidden_states = encoder_hidden_states[self.in_index]
|
1147 |
+
output = self.convs(hidden_states)
|
1148 |
+
if self.concat_input:
|
1149 |
+
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
|
1150 |
+
output = self.classifier(output)
|
1151 |
+
return output
|
1152 |
+
|
1153 |
+
|
1154 |
+
@add_start_docstrings(
|
1155 |
+
"""
|
1156 |
+
Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
|
1157 |
+
""",
|
1158 |
+
BEIT_START_DOCSTRING,
|
1159 |
+
)
|
1160 |
+
class BeitForSemanticSegmentation(BeitPreTrainedModel):
|
1161 |
+
def __init__(self, config: BeitConfig) -> None:
|
1162 |
+
super().__init__(config)
|
1163 |
+
|
1164 |
+
self.num_labels = config.num_labels
|
1165 |
+
self.beit = BeitModel(config, add_pooling_layer=False)
|
1166 |
+
|
1167 |
+
# FPNs
|
1168 |
+
if len(self.config.out_indices) != 4:
|
1169 |
+
raise ValueError(
|
1170 |
+
"BeitForSemanticSegmentation requires config.out_indices to be a list of 4 integers, "
|
1171 |
+
"specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
|
1172 |
+
"a base-sized architecture."
|
1173 |
+
)
|
1174 |
+
self.fpn1 = nn.Sequential(
|
1175 |
+
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
|
1176 |
+
nn.BatchNorm2d(config.hidden_size),
|
1177 |
+
nn.GELU(),
|
1178 |
+
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
|
1179 |
+
)
|
1180 |
+
self.fpn2 = nn.Sequential(
|
1181 |
+
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
|
1182 |
+
)
|
1183 |
+
self.fpn3 = nn.Identity()
|
1184 |
+
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
|
1185 |
+
|
1186 |
+
# Semantic segmentation head(s)
|
1187 |
+
self.decode_head = BeitUperHead(config)
|
1188 |
+
self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None
|
1189 |
+
|
1190 |
+
# Initialize weights and apply final processing
|
1191 |
+
self.post_init()
|
1192 |
+
|
1193 |
+
def compute_loss(self, logits, auxiliary_logits, labels):
|
1194 |
+
# upsample logits to the images' original size
|
1195 |
+
upsampled_logits = nn.functional.interpolate(
|
1196 |
+
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
|
1197 |
+
)
|
1198 |
+
if auxiliary_logits is not None:
|
1199 |
+
upsampled_auxiliary_logits = nn.functional.interpolate(
|
1200 |
+
auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
|
1201 |
+
)
|
1202 |
+
# compute weighted loss
|
1203 |
+
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
|
1204 |
+
main_loss = loss_fct(upsampled_logits, labels)
|
1205 |
+
loss = main_loss
|
1206 |
+
if auxiliary_logits is not None:
|
1207 |
+
auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
|
1208 |
+
loss += self.config.auxiliary_loss_weight * auxiliary_loss
|
1209 |
+
|
1210 |
+
return loss
|
1211 |
+
|
1212 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
1213 |
+
@replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
|
1214 |
+
def forward(
|
1215 |
+
self,
|
1216 |
+
pixel_values: Optional[torch.Tensor] = None,
|
1217 |
+
head_mask: Optional[torch.Tensor] = None,
|
1218 |
+
labels: Optional[torch.Tensor] = None,
|
1219 |
+
output_attentions: Optional[bool] = None,
|
1220 |
+
output_hidden_states: Optional[bool] = None,
|
1221 |
+
return_dict: Optional[bool] = None,
|
1222 |
+
) -> Union[tuple, SemanticSegmenterOutput]:
|
1223 |
+
r"""
|
1224 |
+
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
|
1225 |
+
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
|
1226 |
+
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
|
1227 |
+
|
1228 |
+
Returns:
|
1229 |
+
|
1230 |
+
Examples:
|
1231 |
+
|
1232 |
+
```python
|
1233 |
+
>>> from transformers import AutoImageProcessor, BeitForSemanticSegmentation
|
1234 |
+
>>> from PIL import Image
|
1235 |
+
>>> import requests
|
1236 |
+
|
1237 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1238 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1239 |
+
|
1240 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
|
1241 |
+
>>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
|
1242 |
+
|
1243 |
+
>>> inputs = image_processor(images=image, return_tensors="pt")
|
1244 |
+
>>> outputs = model(**inputs)
|
1245 |
+
>>> # logits are of shape (batch_size, num_labels, height, width)
|
1246 |
+
>>> logits = outputs.logits
|
1247 |
+
```"""
|
1248 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1249 |
+
output_hidden_states = (
|
1250 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1251 |
+
)
|
1252 |
+
|
1253 |
+
outputs = self.beit(
|
1254 |
+
pixel_values,
|
1255 |
+
head_mask=head_mask,
|
1256 |
+
output_attentions=output_attentions,
|
1257 |
+
output_hidden_states=True, # we need the intermediate hidden states
|
1258 |
+
return_dict=return_dict,
|
1259 |
+
)
|
1260 |
+
|
1261 |
+
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
1262 |
+
|
1263 |
+
# only keep certain features, and reshape
|
1264 |
+
# note that we do +1 as the encoder_hidden_states also includes the initial embeddings
|
1265 |
+
features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
|
1266 |
+
batch_size = pixel_values.shape[0]
|
1267 |
+
patch_resolution = self.config.image_size // self.config.patch_size
|
1268 |
+
features = [
|
1269 |
+
x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features
|
1270 |
+
]
|
1271 |
+
|
1272 |
+
# apply FPNs
|
1273 |
+
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
|
1274 |
+
for i in range(len(features)):
|
1275 |
+
features[i] = ops[i](features[i])
|
1276 |
+
|
1277 |
+
logits = self.decode_head(features)
|
1278 |
+
|
1279 |
+
auxiliary_logits = None
|
1280 |
+
if self.auxiliary_head is not None:
|
1281 |
+
auxiliary_logits = self.auxiliary_head(features)
|
1282 |
+
|
1283 |
+
loss = None
|
1284 |
+
if labels is not None:
|
1285 |
+
if self.config.num_labels == 1:
|
1286 |
+
raise ValueError("The number of labels should be greater than one")
|
1287 |
+
else:
|
1288 |
+
loss = self.compute_loss(logits, auxiliary_logits, labels)
|
1289 |
+
|
1290 |
+
if not return_dict:
|
1291 |
+
if output_hidden_states:
|
1292 |
+
output = (logits,) + outputs[1:]
|
1293 |
+
else:
|
1294 |
+
output = (logits,) + outputs[2:]
|
1295 |
+
return ((loss,) + output) if loss is not None else output
|
1296 |
+
|
1297 |
+
return SemanticSegmenterOutput(
|
1298 |
+
loss=loss,
|
1299 |
+
logits=logits,
|
1300 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
1301 |
+
attentions=outputs.attentions,
|
1302 |
+
)
|
1303 |
+
|
1304 |
+
|
1305 |
+
@add_start_docstrings(
|
1306 |
+
"""
|
1307 |
+
BEiT backbone, to be used with frameworks like DETR and MaskFormer.
|
1308 |
+
""",
|
1309 |
+
BEIT_START_DOCSTRING,
|
1310 |
+
)
|
1311 |
+
class BeitBackbone(BeitPreTrainedModel, BackboneMixin):
|
1312 |
+
def __init__(self, config):
|
1313 |
+
super().__init__(config)
|
1314 |
+
super()._init_backbone(config)
|
1315 |
+
|
1316 |
+
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
|
1317 |
+
self.embeddings = BeitEmbeddings(config)
|
1318 |
+
self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
|
1319 |
+
|
1320 |
+
if config.add_fpn:
|
1321 |
+
if len(self.config.out_indices) != 4:
|
1322 |
+
raise ValueError(
|
1323 |
+
"BeitBackbone requires config.out_indices to be a list of 4 integers, "
|
1324 |
+
"specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
|
1325 |
+
"a base-sized architecture."
|
1326 |
+
)
|
1327 |
+
hidden_size = config.hidden_size
|
1328 |
+
self.fpn1 = nn.Sequential(
|
1329 |
+
nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
|
1330 |
+
nn.BatchNorm2d(hidden_size, eps=config.batch_norm_eps),
|
1331 |
+
nn.GELU(),
|
1332 |
+
nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
|
1333 |
+
)
|
1334 |
+
|
1335 |
+
self.fpn2 = nn.Sequential(nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2))
|
1336 |
+
self.fpn3 = nn.Identity()
|
1337 |
+
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
|
1338 |
+
|
1339 |
+
# initialize weights and apply final processing
|
1340 |
+
self.post_init()
|
1341 |
+
|
1342 |
+
def get_input_embeddings(self):
|
1343 |
+
return self.embeddings.patch_embeddings
|
1344 |
+
|
1345 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
|
1346 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
1347 |
+
def forward(
|
1348 |
+
self,
|
1349 |
+
pixel_values: Tensor,
|
1350 |
+
output_hidden_states: Optional[bool] = None,
|
1351 |
+
output_attentions: Optional[bool] = None,
|
1352 |
+
return_dict: Optional[bool] = None,
|
1353 |
+
) -> BackboneOutput:
|
1354 |
+
"""
|
1355 |
+
Returns:
|
1356 |
+
|
1357 |
+
Examples:
|
1358 |
+
|
1359 |
+
```python
|
1360 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
1361 |
+
>>> import torch
|
1362 |
+
>>> from PIL import Image
|
1363 |
+
>>> import requests
|
1364 |
+
|
1365 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1366 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1367 |
+
|
1368 |
+
>>> processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
|
1369 |
+
>>> model = AutoBackbone.from_pretrained(
|
1370 |
+
... "microsoft/beit-base-patch16-224", out_features=["stage1", "stage2", "stage3", "stage4"]
|
1371 |
+
... )
|
1372 |
+
|
1373 |
+
>>> inputs = processor(image, return_tensors="pt")
|
1374 |
+
|
1375 |
+
>>> outputs = model(**inputs)
|
1376 |
+
>>> feature_maps = outputs.feature_maps
|
1377 |
+
>>> list(feature_maps[-1].shape)
|
1378 |
+
[1, 768, 14, 14]
|
1379 |
+
```"""
|
1380 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1381 |
+
output_hidden_states = (
|
1382 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1383 |
+
)
|
1384 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1385 |
+
|
1386 |
+
batch_size = pixel_values.shape[0]
|
1387 |
+
embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values)
|
1388 |
+
|
1389 |
+
outputs = self.encoder(
|
1390 |
+
embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
|
1391 |
+
)
|
1392 |
+
|
1393 |
+
hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
1394 |
+
|
1395 |
+
feature_maps = ()
|
1396 |
+
for stage, hidden_state in zip(self.stage_names, hidden_states):
|
1397 |
+
if stage in self.out_features:
|
1398 |
+
if self.config.reshape_hidden_states:
|
1399 |
+
hidden_state = hidden_state[:, 1:, :]
|
1400 |
+
hidden_state = hidden_state.permute(0, 2, 1)
|
1401 |
+
hidden_state = hidden_state.reshape(batch_size, -1, patch_height, patch_width)
|
1402 |
+
|
1403 |
+
feature_maps += (hidden_state,)
|
1404 |
+
|
1405 |
+
if self.config.add_fpn:
|
1406 |
+
feature_maps = [
|
1407 |
+
self.fpn1(feature_maps[0]),
|
1408 |
+
self.fpn2(feature_maps[1]),
|
1409 |
+
self.fpn3(feature_maps[2]),
|
1410 |
+
self.fpn4(feature_maps[3]),
|
1411 |
+
]
|
1412 |
+
feature_maps = tuple(feature_maps)
|
1413 |
+
|
1414 |
+
if not return_dict:
|
1415 |
+
if output_hidden_states:
|
1416 |
+
output = (feature_maps,) + outputs[1:]
|
1417 |
+
else:
|
1418 |
+
output = (feature_maps,) + outputs[2:]
|
1419 |
+
return output
|
1420 |
+
|
1421 |
+
return BackboneOutput(
|
1422 |
+
feature_maps=feature_maps,
|
1423 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
1424 |
+
attentions=outputs.attentions,
|
1425 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py
ADDED
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 Microsoft Research and the HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
|
17 |
+
from typing import Callable, List, Optional, Tuple
|
18 |
+
|
19 |
+
import flax
|
20 |
+
import flax.linen as nn
|
21 |
+
import jax
|
22 |
+
import jax.numpy as jnp
|
23 |
+
import numpy as np
|
24 |
+
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
|
25 |
+
from flax.linen.attention import dot_product_attention_weights
|
26 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
27 |
+
|
28 |
+
from ...modeling_flax_outputs import (
|
29 |
+
FlaxBaseModelOutput,
|
30 |
+
FlaxBaseModelOutputWithPooling,
|
31 |
+
FlaxMaskedLMOutput,
|
32 |
+
FlaxSequenceClassifierOutput,
|
33 |
+
)
|
34 |
+
from ...modeling_flax_utils import (
|
35 |
+
ACT2FN,
|
36 |
+
FlaxPreTrainedModel,
|
37 |
+
append_replace_return_docstrings,
|
38 |
+
overwrite_call_docstring,
|
39 |
+
)
|
40 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
41 |
+
from .configuration_beit import BeitConfig
|
42 |
+
|
43 |
+
|
44 |
+
@flax.struct.dataclass
|
45 |
+
class FlaxBeitModelOutputWithPooling(FlaxBaseModelOutputWithPooling):
|
46 |
+
"""
|
47 |
+
Class for outputs of [`FlaxBeitModel`].
|
48 |
+
|
49 |
+
Args:
|
50 |
+
last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
|
51 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
52 |
+
pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
|
53 |
+
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
|
54 |
+
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
|
55 |
+
will be returned.
|
56 |
+
hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
57 |
+
Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
|
58 |
+
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
|
59 |
+
the initial embedding outputs.
|
60 |
+
attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
61 |
+
Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
62 |
+
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
|
63 |
+
the self-attention heads.
|
64 |
+
"""
|
65 |
+
|
66 |
+
|
67 |
+
BEIT_START_DOCSTRING = r"""
|
68 |
+
|
69 |
+
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
70 |
+
library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
|
71 |
+
|
72 |
+
This model is also a
|
73 |
+
[flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
|
74 |
+
a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
|
75 |
+
behavior.
|
76 |
+
|
77 |
+
Finally, this model supports inherent JAX features such as:
|
78 |
+
|
79 |
+
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
|
80 |
+
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
|
81 |
+
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
|
82 |
+
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
|
83 |
+
|
84 |
+
Parameters:
|
85 |
+
config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
|
86 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
87 |
+
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
|
88 |
+
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
|
89 |
+
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
|
90 |
+
`jax.numpy.bfloat16` (on TPUs).
|
91 |
+
|
92 |
+
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
|
93 |
+
specified all the computation will be performed with the given `dtype`.
|
94 |
+
|
95 |
+
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
|
96 |
+
parameters.**
|
97 |
+
|
98 |
+
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
|
99 |
+
[`~FlaxPreTrainedModel.to_bf16`].
|
100 |
+
"""
|
101 |
+
|
102 |
+
BEIT_INPUTS_DOCSTRING = r"""
|
103 |
+
Args:
|
104 |
+
pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
|
105 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
106 |
+
[`AutoImageProcessor.__call__`] for details.
|
107 |
+
|
108 |
+
output_attentions (`bool`, *optional*):
|
109 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
110 |
+
tensors for more detail.
|
111 |
+
output_hidden_states (`bool`, *optional*):
|
112 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
113 |
+
more detail.
|
114 |
+
return_dict (`bool`, *optional*):
|
115 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
116 |
+
"""
|
117 |
+
|
118 |
+
|
119 |
+
def relative_position_index_init(window_size: Tuple[int, int]) -> jnp.ndarray:
|
120 |
+
"""
|
121 |
+
get pair-wise relative position index for each token inside the window
|
122 |
+
"""
|
123 |
+
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
124 |
+
|
125 |
+
coords_h = np.arange(window_size[0])
|
126 |
+
coords_w = np.arange(window_size[1])
|
127 |
+
coords = np.stack(np.meshgrid(coords_h, coords_w, indexing="ij")) # 2, Wh, Ww
|
128 |
+
coords_flatten = np.reshape(coords, (2, -1))
|
129 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
130 |
+
relative_coords = np.transpose(relative_coords, (1, 2, 0)) # Wh*Ww, Wh*Ww, 2
|
131 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
132 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
133 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
134 |
+
|
135 |
+
relative_position_index = np.zeros(shape=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
136 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
137 |
+
relative_position_index[0, 0:] = num_relative_distance - 3
|
138 |
+
relative_position_index[0:, 0] = num_relative_distance - 2
|
139 |
+
relative_position_index[0, 0] = num_relative_distance - 1
|
140 |
+
return jnp.array(relative_position_index)
|
141 |
+
|
142 |
+
|
143 |
+
def ones_with_scale(key, shape, scale, dtype=jnp.float32):
|
144 |
+
return jnp.ones(shape, dtype) * scale
|
145 |
+
|
146 |
+
|
147 |
+
class FlaxBeitDropPath(nn.Module):
|
148 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
149 |
+
|
150 |
+
rate: float
|
151 |
+
|
152 |
+
@nn.module.compact
|
153 |
+
def __call__(self, inputs, deterministic: Optional[bool] = True):
|
154 |
+
if self.rate == 0.0:
|
155 |
+
return inputs
|
156 |
+
keep_prob = 1.0 - self.rate
|
157 |
+
if deterministic:
|
158 |
+
return inputs
|
159 |
+
else:
|
160 |
+
shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
161 |
+
rng = self.make_rng("droppath")
|
162 |
+
random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype)
|
163 |
+
binary_tensor = jnp.floor(random_tensor)
|
164 |
+
output = inputs / keep_prob * binary_tensor
|
165 |
+
return output
|
166 |
+
|
167 |
+
|
168 |
+
class FlaxBeitPatchEmbeddings(nn.Module):
|
169 |
+
config: BeitConfig
|
170 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
171 |
+
|
172 |
+
def setup(self):
|
173 |
+
self.num_channels = self.config.num_channels
|
174 |
+
image_size = self.config.image_size
|
175 |
+
patch_size = self.config.patch_size
|
176 |
+
num_patches = (image_size // patch_size) * (image_size // patch_size)
|
177 |
+
patch_shape = (image_size // patch_size, image_size // patch_size)
|
178 |
+
self.num_patches = num_patches
|
179 |
+
self.patch_shape = patch_shape
|
180 |
+
self.projection = nn.Conv(
|
181 |
+
self.config.hidden_size,
|
182 |
+
kernel_size=(patch_size, patch_size),
|
183 |
+
strides=(patch_size, patch_size),
|
184 |
+
padding="VALID",
|
185 |
+
dtype=self.dtype,
|
186 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
187 |
+
)
|
188 |
+
|
189 |
+
def __call__(self, pixel_values):
|
190 |
+
num_channels = pixel_values.shape[-1]
|
191 |
+
if num_channels != self.num_channels:
|
192 |
+
raise ValueError(
|
193 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
194 |
+
)
|
195 |
+
embeddings = self.projection(pixel_values)
|
196 |
+
batch_size, _, _, channels = embeddings.shape
|
197 |
+
return jnp.reshape(embeddings, (batch_size, -1, channels))
|
198 |
+
|
199 |
+
|
200 |
+
class FlaxBeitEmbeddings(nn.Module):
|
201 |
+
"""Construct the CLS token, position and patch embeddings."""
|
202 |
+
|
203 |
+
config: BeitConfig
|
204 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
205 |
+
|
206 |
+
def setup(self):
|
207 |
+
self.cls_token = self.param("cls_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
|
208 |
+
if self.config.use_mask_token:
|
209 |
+
self.mask_token = self.param("mask_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
|
210 |
+
self.patch_embeddings = FlaxBeitPatchEmbeddings(self.config, dtype=self.dtype)
|
211 |
+
num_patches = self.patch_embeddings.num_patches
|
212 |
+
if self.config.use_absolute_position_embeddings:
|
213 |
+
self.position_embeddings = self.param(
|
214 |
+
"position_embeddings", nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size)
|
215 |
+
)
|
216 |
+
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
|
217 |
+
|
218 |
+
def __call__(self, pixel_values, bool_masked_pos=None, deterministic=True):
|
219 |
+
embeddings = self.patch_embeddings(pixel_values)
|
220 |
+
batch_size, seq_len, _ = embeddings.shape
|
221 |
+
|
222 |
+
cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size))
|
223 |
+
cls_tokens = cls_tokens.astype(embeddings.dtype)
|
224 |
+
|
225 |
+
if bool_masked_pos is not None:
|
226 |
+
mask_tokens = jnp.broadcast_to(self.mask_token, (batch_size, seq_len, self.config.hidden_size))
|
227 |
+
mask_tokens = mask_tokens.astype(embeddings.dtype)
|
228 |
+
# replace the masked visual tokens by mask_tokens
|
229 |
+
w = jnp.expand_dims(bool_masked_pos, axis=-1)
|
230 |
+
embeddings = embeddings * (1 - w) + mask_tokens * w
|
231 |
+
|
232 |
+
embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1)
|
233 |
+
|
234 |
+
if self.config.use_absolute_position_embeddings:
|
235 |
+
embeddings = embeddings + self.position_embeddings.astype(embeddings.dtype)
|
236 |
+
|
237 |
+
embeddings = self.dropout(embeddings, deterministic=deterministic)
|
238 |
+
return embeddings
|
239 |
+
|
240 |
+
|
241 |
+
class FlaxBeitRelativePositionBias(nn.Module):
|
242 |
+
config: BeitConfig
|
243 |
+
window_size: Tuple[int, int]
|
244 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
245 |
+
|
246 |
+
def setup(self):
|
247 |
+
num_relative_distance = (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) + 3
|
248 |
+
self.relative_position_bias_table = self.param(
|
249 |
+
"relative_position_bias_table",
|
250 |
+
nn.initializers.zeros,
|
251 |
+
(num_relative_distance, self.config.num_attention_heads),
|
252 |
+
) # 2*Wh-1 * 2*Ww-1, nH
|
253 |
+
# cls to token & token 2 cls & cls to cls
|
254 |
+
|
255 |
+
self.relative_position_index = relative_position_index_init(self.window_size)
|
256 |
+
|
257 |
+
def __call__(self):
|
258 |
+
index = self.relative_position_index.reshape(-1)
|
259 |
+
shape = (self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1)
|
260 |
+
relative_position_bias = self.relative_position_bias_table[index].reshape(shape) # Wh*Ww,Wh*Ww,nH
|
261 |
+
return jnp.transpose(relative_position_bias, (2, 0, 1))
|
262 |
+
|
263 |
+
|
264 |
+
class FlaxBeitSelfAttention(nn.Module):
|
265 |
+
config: BeitConfig
|
266 |
+
window_size: Tuple[int, int]
|
267 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
268 |
+
|
269 |
+
def setup(self):
|
270 |
+
if self.config.hidden_size % self.config.num_attention_heads != 0 and not hasattr(
|
271 |
+
self.config, "embedding_size"
|
272 |
+
):
|
273 |
+
raise ValueError(
|
274 |
+
f"The hidden size {self.config.hidden_size,} is not a multiple of the number of attention "
|
275 |
+
f"heads {self.config.num_attention_heads}."
|
276 |
+
)
|
277 |
+
|
278 |
+
self.query = nn.Dense(
|
279 |
+
self.config.hidden_size,
|
280 |
+
dtype=self.dtype,
|
281 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
282 |
+
)
|
283 |
+
self.key = nn.Dense(
|
284 |
+
self.config.hidden_size,
|
285 |
+
dtype=self.dtype,
|
286 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
287 |
+
use_bias=False,
|
288 |
+
)
|
289 |
+
self.value = nn.Dense(
|
290 |
+
self.config.hidden_size,
|
291 |
+
dtype=self.dtype,
|
292 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
293 |
+
)
|
294 |
+
|
295 |
+
self.relative_position_bias = (
|
296 |
+
FlaxBeitRelativePositionBias(self.config, window_size=self.window_size, dtype=self.dtype)
|
297 |
+
if self.window_size
|
298 |
+
else None
|
299 |
+
)
|
300 |
+
|
301 |
+
def __call__(
|
302 |
+
self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
|
303 |
+
):
|
304 |
+
head_dim = self.config.hidden_size // self.config.num_attention_heads
|
305 |
+
|
306 |
+
query_states = self.query(hidden_states).reshape(
|
307 |
+
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
|
308 |
+
)
|
309 |
+
value_states = self.value(hidden_states).reshape(
|
310 |
+
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
|
311 |
+
)
|
312 |
+
key_states = self.key(hidden_states).reshape(
|
313 |
+
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
|
314 |
+
)
|
315 |
+
|
316 |
+
dropout_rng = None
|
317 |
+
if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
|
318 |
+
dropout_rng = self.make_rng("dropout")
|
319 |
+
|
320 |
+
attention_bias = jnp.array(0.0, dtype=self.dtype)
|
321 |
+
# Add relative position bias if present.
|
322 |
+
if self.relative_position_bias is not None:
|
323 |
+
attention_bias = jnp.expand_dims(self.relative_position_bias(), 0)
|
324 |
+
attention_bias = attention_bias.astype(query_states.dtype)
|
325 |
+
|
326 |
+
# Add shared relative position bias if provided.
|
327 |
+
if relative_position_bias is not None:
|
328 |
+
attention_bias = attention_bias + relative_position_bias.astype(attention_bias.dtype)
|
329 |
+
|
330 |
+
attn_weights = dot_product_attention_weights(
|
331 |
+
query_states,
|
332 |
+
key_states,
|
333 |
+
bias=attention_bias,
|
334 |
+
dropout_rng=dropout_rng,
|
335 |
+
dropout_rate=self.config.attention_probs_dropout_prob,
|
336 |
+
broadcast_dropout=True,
|
337 |
+
deterministic=deterministic,
|
338 |
+
dtype=self.dtype,
|
339 |
+
precision=None,
|
340 |
+
)
|
341 |
+
|
342 |
+
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
|
343 |
+
attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
|
344 |
+
|
345 |
+
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
|
346 |
+
return outputs
|
347 |
+
|
348 |
+
|
349 |
+
class FlaxBeitSelfOutput(nn.Module):
|
350 |
+
config: BeitConfig
|
351 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
352 |
+
|
353 |
+
def setup(self):
|
354 |
+
self.dense = nn.Dense(
|
355 |
+
self.config.hidden_size,
|
356 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
357 |
+
dtype=self.dtype,
|
358 |
+
)
|
359 |
+
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
|
360 |
+
|
361 |
+
def __call__(self, hidden_states, deterministic: bool = True):
|
362 |
+
hidden_states = self.dense(hidden_states)
|
363 |
+
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
|
364 |
+
return hidden_states
|
365 |
+
|
366 |
+
|
367 |
+
class FlaxBeitAttention(nn.Module):
|
368 |
+
config: BeitConfig
|
369 |
+
window_size: Tuple[int, int]
|
370 |
+
dtype: jnp.dtype = jnp.float32
|
371 |
+
|
372 |
+
def setup(self):
|
373 |
+
self.attention = FlaxBeitSelfAttention(self.config, self.window_size, dtype=self.dtype)
|
374 |
+
self.output = FlaxBeitSelfOutput(self.config, dtype=self.dtype)
|
375 |
+
|
376 |
+
def __call__(
|
377 |
+
self, hidden_states, relative_position_bias=None, deterministic=True, output_attentions: bool = False
|
378 |
+
):
|
379 |
+
attn_outputs = self.attention(
|
380 |
+
hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
|
381 |
+
)
|
382 |
+
attn_output = attn_outputs[0]
|
383 |
+
attn_output = self.output(attn_output, deterministic=deterministic)
|
384 |
+
|
385 |
+
outputs = (attn_output,)
|
386 |
+
|
387 |
+
if output_attentions:
|
388 |
+
outputs += (attn_outputs[1],)
|
389 |
+
|
390 |
+
return outputs
|
391 |
+
|
392 |
+
|
393 |
+
class FlaxBeitIntermediate(nn.Module):
|
394 |
+
config: BeitConfig
|
395 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
396 |
+
|
397 |
+
def setup(self):
|
398 |
+
self.dense = nn.Dense(
|
399 |
+
self.config.intermediate_size,
|
400 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
401 |
+
dtype=self.dtype,
|
402 |
+
)
|
403 |
+
self.activation = ACT2FN[self.config.hidden_act]
|
404 |
+
|
405 |
+
def __call__(self, hidden_states):
|
406 |
+
hidden_states = self.dense(hidden_states)
|
407 |
+
hidden_states = self.activation(hidden_states)
|
408 |
+
|
409 |
+
return hidden_states
|
410 |
+
|
411 |
+
|
412 |
+
class FlaxBeitOutput(nn.Module):
|
413 |
+
config: BeitConfig
|
414 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
415 |
+
|
416 |
+
def setup(self):
|
417 |
+
self.dense = nn.Dense(
|
418 |
+
self.config.hidden_size,
|
419 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
420 |
+
dtype=self.dtype,
|
421 |
+
)
|
422 |
+
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
|
423 |
+
|
424 |
+
def __call__(self, hidden_states, deterministic: bool = True):
|
425 |
+
hidden_states = self.dense(hidden_states)
|
426 |
+
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
|
427 |
+
|
428 |
+
return hidden_states
|
429 |
+
|
430 |
+
|
431 |
+
class FlaxBeitLayer(nn.Module):
|
432 |
+
config: BeitConfig
|
433 |
+
window_size: Tuple[int, int]
|
434 |
+
drop_path_rate: float
|
435 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
436 |
+
|
437 |
+
def setup(self):
|
438 |
+
self.attention = FlaxBeitAttention(self.config, self.window_size, dtype=self.dtype)
|
439 |
+
self.intermediate = FlaxBeitIntermediate(self.config, dtype=self.dtype)
|
440 |
+
self.output = FlaxBeitOutput(self.config, dtype=self.dtype)
|
441 |
+
self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
442 |
+
self.drop_path = FlaxBeitDropPath(rate=self.drop_path_rate)
|
443 |
+
self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
444 |
+
|
445 |
+
self.init_values = self.config.layer_scale_init_value
|
446 |
+
if self.init_values > 0:
|
447 |
+
self.lambda_1 = self.param("lambda_1", ones_with_scale, (self.config.hidden_size), self.init_values)
|
448 |
+
self.lambda_2 = self.param("lambda_2", ones_with_scale, (self.config.hidden_size), self.init_values)
|
449 |
+
else:
|
450 |
+
self.lambda_1 = None
|
451 |
+
self.lambda_2 = None
|
452 |
+
|
453 |
+
def __call__(
|
454 |
+
self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
|
455 |
+
):
|
456 |
+
self_attention_outputs = self.attention(
|
457 |
+
self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
|
458 |
+
relative_position_bias,
|
459 |
+
deterministic=deterministic,
|
460 |
+
output_attentions=output_attentions,
|
461 |
+
)
|
462 |
+
attention_output = self_attention_outputs[0]
|
463 |
+
|
464 |
+
# apply lambda_1 if present
|
465 |
+
if self.lambda_1 is not None:
|
466 |
+
attention_output = self.lambda_1.astype(attention_output.dtype) * attention_output
|
467 |
+
|
468 |
+
# first residual connection
|
469 |
+
hidden_states = self.drop_path(attention_output, deterministic=deterministic) + hidden_states
|
470 |
+
|
471 |
+
# in BEiT, layernorm is also applied after self-attention
|
472 |
+
layer_output = self.layernorm_after(hidden_states)
|
473 |
+
|
474 |
+
layer_output = self.intermediate(layer_output)
|
475 |
+
layer_output = self.output(layer_output, deterministic=deterministic)
|
476 |
+
|
477 |
+
# apply lambda_2 if present
|
478 |
+
if self.lambda_2 is not None:
|
479 |
+
layer_output = self.lambda_2.astype(layer_output.dtype) * layer_output
|
480 |
+
|
481 |
+
# second residual connection
|
482 |
+
layer_output = self.drop_path(layer_output, deterministic=deterministic) + hidden_states
|
483 |
+
|
484 |
+
outputs = (layer_output,)
|
485 |
+
|
486 |
+
if output_attentions:
|
487 |
+
outputs += (self_attention_outputs[1],)
|
488 |
+
|
489 |
+
return outputs
|
490 |
+
|
491 |
+
|
492 |
+
class FlaxBeitLayerCollection(nn.Module):
|
493 |
+
config: BeitConfig
|
494 |
+
window_size: Tuple[int, int]
|
495 |
+
drop_path_rates: List[float]
|
496 |
+
relative_position_bias: Callable[[], jnp.ndarray]
|
497 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
498 |
+
|
499 |
+
def setup(self):
|
500 |
+
self.layers = [
|
501 |
+
FlaxBeitLayer(
|
502 |
+
self.config,
|
503 |
+
window_size=self.window_size if self.config.use_relative_position_bias else None,
|
504 |
+
drop_path_rate=self.drop_path_rates[i],
|
505 |
+
name=str(i),
|
506 |
+
dtype=self.dtype,
|
507 |
+
)
|
508 |
+
for i in range(self.config.num_hidden_layers)
|
509 |
+
]
|
510 |
+
|
511 |
+
def __call__(
|
512 |
+
self,
|
513 |
+
hidden_states,
|
514 |
+
deterministic: bool = True,
|
515 |
+
output_attentions: bool = False,
|
516 |
+
output_hidden_states: bool = False,
|
517 |
+
return_dict: bool = True,
|
518 |
+
):
|
519 |
+
all_attentions = () if output_attentions else None
|
520 |
+
all_hidden_states = () if output_hidden_states else None
|
521 |
+
|
522 |
+
for i, layer in enumerate(self.layers):
|
523 |
+
if output_hidden_states:
|
524 |
+
all_hidden_states += (hidden_states,)
|
525 |
+
relative_position_bias = self.relative_position_bias() if self.relative_position_bias is not None else None
|
526 |
+
layer_outputs = layer(
|
527 |
+
hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
|
528 |
+
)
|
529 |
+
|
530 |
+
hidden_states = layer_outputs[0]
|
531 |
+
|
532 |
+
if output_attentions:
|
533 |
+
all_attentions += (layer_outputs[1],)
|
534 |
+
|
535 |
+
if output_hidden_states:
|
536 |
+
all_hidden_states += (hidden_states,)
|
537 |
+
|
538 |
+
outputs = (hidden_states,)
|
539 |
+
if not return_dict:
|
540 |
+
return tuple(v for v in outputs if v is not None)
|
541 |
+
|
542 |
+
return FlaxBaseModelOutput(
|
543 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
544 |
+
)
|
545 |
+
|
546 |
+
|
547 |
+
class FlaxBeitEncoder(nn.Module):
|
548 |
+
config: BeitConfig
|
549 |
+
window_size: Tuple[int, int]
|
550 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
551 |
+
|
552 |
+
def setup(self):
|
553 |
+
if self.config.use_shared_relative_position_bias:
|
554 |
+
self.relative_position_bias = FlaxBeitRelativePositionBias(
|
555 |
+
config=self.config, window_size=self.window_size, dtype=self.dtype
|
556 |
+
)
|
557 |
+
|
558 |
+
# stochastic depth decay rule
|
559 |
+
drop_path_rates = list(np.linspace(0, self.config.drop_path_rate, self.config.num_hidden_layers))
|
560 |
+
self.layer = FlaxBeitLayerCollection(
|
561 |
+
self.config,
|
562 |
+
window_size=self.window_size,
|
563 |
+
drop_path_rates=drop_path_rates,
|
564 |
+
relative_position_bias=self.relative_position_bias
|
565 |
+
if self.config.use_shared_relative_position_bias
|
566 |
+
else None,
|
567 |
+
dtype=self.dtype,
|
568 |
+
)
|
569 |
+
|
570 |
+
def __call__(
|
571 |
+
self,
|
572 |
+
hidden_states,
|
573 |
+
deterministic: bool = True,
|
574 |
+
output_attentions: bool = False,
|
575 |
+
output_hidden_states: bool = False,
|
576 |
+
return_dict: bool = True,
|
577 |
+
):
|
578 |
+
return self.layer(
|
579 |
+
hidden_states,
|
580 |
+
deterministic=deterministic,
|
581 |
+
output_attentions=output_attentions,
|
582 |
+
output_hidden_states=output_hidden_states,
|
583 |
+
return_dict=return_dict,
|
584 |
+
)
|
585 |
+
|
586 |
+
|
587 |
+
class FlaxBeitPreTrainedModel(FlaxPreTrainedModel):
|
588 |
+
"""
|
589 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
590 |
+
models.
|
591 |
+
"""
|
592 |
+
|
593 |
+
config_class = BeitConfig
|
594 |
+
base_model_prefix = "beit"
|
595 |
+
main_input_name = "pixel_values"
|
596 |
+
module_class: nn.Module = None
|
597 |
+
|
598 |
+
def __init__(
|
599 |
+
self,
|
600 |
+
config: BeitConfig,
|
601 |
+
input_shape=None,
|
602 |
+
seed: int = 0,
|
603 |
+
dtype: jnp.dtype = jnp.float32,
|
604 |
+
_do_init: bool = True,
|
605 |
+
**kwargs,
|
606 |
+
):
|
607 |
+
module = self.module_class(config=config, dtype=dtype, **kwargs)
|
608 |
+
if input_shape is None:
|
609 |
+
input_shape = (1, config.image_size, config.image_size, config.num_channels)
|
610 |
+
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
|
611 |
+
|
612 |
+
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
|
613 |
+
# init input tensors
|
614 |
+
pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
|
615 |
+
|
616 |
+
params_rng, dropout_rng = jax.random.split(rng)
|
617 |
+
dropout_rng, droppath_rng = jax.random.split(dropout_rng)
|
618 |
+
rngs = {"params": params_rng, "dropout": dropout_rng, "droppath": droppath_rng}
|
619 |
+
|
620 |
+
random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"]
|
621 |
+
|
622 |
+
if params is not None:
|
623 |
+
random_params = flatten_dict(unfreeze(random_params))
|
624 |
+
params = flatten_dict(unfreeze(params))
|
625 |
+
for missing_key in self._missing_keys:
|
626 |
+
params[missing_key] = random_params[missing_key]
|
627 |
+
self._missing_keys = set()
|
628 |
+
return freeze(unflatten_dict(params))
|
629 |
+
else:
|
630 |
+
return random_params
|
631 |
+
|
632 |
+
@add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
633 |
+
def __call__(
|
634 |
+
self,
|
635 |
+
pixel_values,
|
636 |
+
bool_masked_pos=None,
|
637 |
+
params: dict = None,
|
638 |
+
dropout_rng: jax.random.PRNGKey = None,
|
639 |
+
train: bool = False,
|
640 |
+
output_attentions: Optional[bool] = None,
|
641 |
+
output_hidden_states: Optional[bool] = None,
|
642 |
+
return_dict: Optional[bool] = None,
|
643 |
+
):
|
644 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
645 |
+
output_hidden_states = (
|
646 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
647 |
+
)
|
648 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
649 |
+
|
650 |
+
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
|
651 |
+
# Handle any PRNG if needed
|
652 |
+
rngs = {}
|
653 |
+
if dropout_rng is not None:
|
654 |
+
dropout_rng, droppath_rng = jax.random.split(dropout_rng)
|
655 |
+
rngs["dropout"] = dropout_rng
|
656 |
+
rngs["droppath"] = droppath_rng
|
657 |
+
|
658 |
+
return self.module.apply(
|
659 |
+
{"params": params or self.params},
|
660 |
+
jnp.array(pixel_values, dtype=jnp.float32),
|
661 |
+
bool_masked_pos,
|
662 |
+
not train,
|
663 |
+
output_attentions,
|
664 |
+
output_hidden_states,
|
665 |
+
return_dict,
|
666 |
+
rngs=rngs,
|
667 |
+
)
|
668 |
+
|
669 |
+
|
670 |
+
class FlaxBeitPooler(nn.Module):
|
671 |
+
config: BeitConfig
|
672 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
673 |
+
|
674 |
+
def setup(self):
|
675 |
+
if self.config.use_mean_pooling:
|
676 |
+
self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
677 |
+
|
678 |
+
def __call__(self, hidden_states):
|
679 |
+
if self.config.use_mean_pooling:
|
680 |
+
# Mean pool the final hidden states of the patch tokens
|
681 |
+
patch_tokens = hidden_states[:, 1:, :]
|
682 |
+
pooled_output = self.layernorm(jnp.mean(patch_tokens, axis=1))
|
683 |
+
else:
|
684 |
+
# Pool by simply taking the final hidden state of the [CLS] token
|
685 |
+
pooled_output = hidden_states[:, 0]
|
686 |
+
|
687 |
+
return pooled_output
|
688 |
+
|
689 |
+
|
690 |
+
class FlaxBeitModule(nn.Module):
|
691 |
+
config: BeitConfig
|
692 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
693 |
+
add_pooling_layer: bool = True
|
694 |
+
|
695 |
+
def setup(self):
|
696 |
+
self.embeddings = FlaxBeitEmbeddings(self.config, dtype=self.dtype)
|
697 |
+
self.encoder = FlaxBeitEncoder(
|
698 |
+
self.config, window_size=self.embeddings.patch_embeddings.patch_shape, dtype=self.dtype
|
699 |
+
)
|
700 |
+
if not self.config.use_mean_pooling:
|
701 |
+
self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
702 |
+
self.pooler = FlaxBeitPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None
|
703 |
+
|
704 |
+
def __call__(
|
705 |
+
self,
|
706 |
+
pixel_values,
|
707 |
+
bool_masked_pos=None,
|
708 |
+
deterministic: bool = True,
|
709 |
+
output_attentions: bool = False,
|
710 |
+
output_hidden_states: bool = False,
|
711 |
+
return_dict: bool = True,
|
712 |
+
):
|
713 |
+
hidden_states = self.embeddings(pixel_values, bool_masked_pos, deterministic=deterministic)
|
714 |
+
|
715 |
+
outputs = self.encoder(
|
716 |
+
hidden_states,
|
717 |
+
deterministic=deterministic,
|
718 |
+
output_attentions=output_attentions,
|
719 |
+
output_hidden_states=output_hidden_states,
|
720 |
+
return_dict=return_dict,
|
721 |
+
)
|
722 |
+
hidden_states = outputs[0]
|
723 |
+
if not self.config.use_mean_pooling:
|
724 |
+
hidden_states = self.layernorm(hidden_states)
|
725 |
+
pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
|
726 |
+
|
727 |
+
if not return_dict:
|
728 |
+
# if pooled is None, don't return it
|
729 |
+
if pooled is None:
|
730 |
+
return (hidden_states,) + outputs[1:]
|
731 |
+
return (hidden_states, pooled) + outputs[1:]
|
732 |
+
|
733 |
+
return FlaxBeitModelOutputWithPooling(
|
734 |
+
last_hidden_state=hidden_states,
|
735 |
+
pooler_output=pooled,
|
736 |
+
hidden_states=outputs.hidden_states,
|
737 |
+
attentions=outputs.attentions,
|
738 |
+
)
|
739 |
+
|
740 |
+
|
741 |
+
@add_start_docstrings(
|
742 |
+
"The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
|
743 |
+
BEIT_START_DOCSTRING,
|
744 |
+
)
|
745 |
+
class FlaxBeitModel(FlaxBeitPreTrainedModel):
|
746 |
+
module_class = FlaxBeitModule
|
747 |
+
|
748 |
+
|
749 |
+
FLAX_BEIT_MODEL_DOCSTRING = """
|
750 |
+
Returns:
|
751 |
+
|
752 |
+
Examples:
|
753 |
+
|
754 |
+
```python
|
755 |
+
>>> from transformers import AutoImageProcessor, FlaxBeitModel
|
756 |
+
>>> from PIL import Image
|
757 |
+
>>> import requests
|
758 |
+
|
759 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
760 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
761 |
+
|
762 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
|
763 |
+
>>> model = FlaxBeitModel.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
|
764 |
+
|
765 |
+
>>> inputs = image_processor(images=image, return_tensors="np")
|
766 |
+
>>> outputs = model(**inputs)
|
767 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
768 |
+
```
|
769 |
+
"""
|
770 |
+
|
771 |
+
overwrite_call_docstring(FlaxBeitModel, FLAX_BEIT_MODEL_DOCSTRING)
|
772 |
+
append_replace_return_docstrings(FlaxBeitModel, output_type=FlaxBeitModelOutputWithPooling, config_class=BeitConfig)
|
773 |
+
|
774 |
+
|
775 |
+
class FlaxBeitForMaskedImageModelingModule(nn.Module):
|
776 |
+
config: BeitConfig
|
777 |
+
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
|
778 |
+
|
779 |
+
def setup(self):
|
780 |
+
self.beit = FlaxBeitModule(self.config, add_pooling_layer=False, dtype=self.dtype)
|
781 |
+
|
782 |
+
# Classifier head
|
783 |
+
self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
|
784 |
+
self.lm_head = nn.Dense(
|
785 |
+
self.config.vocab_size,
|
786 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
787 |
+
dtype=self.dtype,
|
788 |
+
)
|
789 |
+
|
790 |
+
def __call__(
|
791 |
+
self,
|
792 |
+
pixel_values=None,
|
793 |
+
bool_masked_pos=None,
|
794 |
+
deterministic: bool = True,
|
795 |
+
output_attentions=None,
|
796 |
+
output_hidden_states=None,
|
797 |
+
return_dict=None,
|
798 |
+
):
|
799 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
800 |
+
|
801 |
+
outputs = self.beit(
|
802 |
+
pixel_values,
|
803 |
+
bool_masked_pos,
|
804 |
+
deterministic=deterministic,
|
805 |
+
output_attentions=output_attentions,
|
806 |
+
output_hidden_states=output_hidden_states,
|
807 |
+
return_dict=return_dict,
|
808 |
+
)
|
809 |
+
|
810 |
+
sequence_output = outputs[0]
|
811 |
+
sequence_output = self.layernorm(sequence_output)
|
812 |
+
prediction_scores = self.lm_head(sequence_output[:, 1:])
|
813 |
+
|
814 |
+
if not return_dict:
|
815 |
+
output = (prediction_scores,) + outputs[2:]
|
816 |
+
return output
|
817 |
+
|
818 |
+
return FlaxMaskedLMOutput(
|
819 |
+
logits=prediction_scores,
|
820 |
+
hidden_states=outputs.hidden_states,
|
821 |
+
attentions=outputs.attentions,
|
822 |
+
)
|
823 |
+
|
824 |
+
|
825 |
+
@add_start_docstrings(
|
826 |
+
"Beit Model transformer with a 'language' modeling head on top (to predict visual tokens).",
|
827 |
+
BEIT_START_DOCSTRING,
|
828 |
+
)
|
829 |
+
class FlaxBeitForMaskedImageModeling(FlaxBeitPreTrainedModel):
|
830 |
+
module_class = FlaxBeitForMaskedImageModelingModule
|
831 |
+
|
832 |
+
|
833 |
+
FLAX_BEIT_MLM_DOCSTRING = """
|
834 |
+
bool_masked_pos (`numpy.ndarray` of shape `(batch_size, num_patches)`):
|
835 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
836 |
+
|
837 |
+
Returns:
|
838 |
+
|
839 |
+
Examples:
|
840 |
+
|
841 |
+
```python
|
842 |
+
>>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
|
843 |
+
>>> from PIL import Image
|
844 |
+
>>> import requests
|
845 |
+
|
846 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
847 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
848 |
+
|
849 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
850 |
+
>>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
|
851 |
+
|
852 |
+
>>> inputs = image_processor(images=image, return_tensors="np")
|
853 |
+
>>> outputs = model(**inputs)
|
854 |
+
>>> logits = outputs.logits
|
855 |
+
```
|
856 |
+
"""
|
857 |
+
|
858 |
+
overwrite_call_docstring(FlaxBeitForMaskedImageModeling, FLAX_BEIT_MLM_DOCSTRING)
|
859 |
+
append_replace_return_docstrings(
|
860 |
+
FlaxBeitForMaskedImageModeling, output_type=FlaxMaskedLMOutput, config_class=BeitConfig
|
861 |
+
)
|
862 |
+
|
863 |
+
|
864 |
+
class FlaxBeitForImageClassificationModule(nn.Module):
|
865 |
+
config: BeitConfig
|
866 |
+
dtype: jnp.dtype = jnp.float32
|
867 |
+
|
868 |
+
def setup(self):
|
869 |
+
self.beit = FlaxBeitModule(config=self.config, dtype=self.dtype, add_pooling_layer=True)
|
870 |
+
self.classifier = nn.Dense(
|
871 |
+
self.config.num_labels,
|
872 |
+
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
|
873 |
+
dtype=self.dtype,
|
874 |
+
)
|
875 |
+
|
876 |
+
def __call__(
|
877 |
+
self,
|
878 |
+
pixel_values=None,
|
879 |
+
bool_masked_pos=None,
|
880 |
+
deterministic: bool = True,
|
881 |
+
output_attentions=None,
|
882 |
+
output_hidden_states=None,
|
883 |
+
return_dict=None,
|
884 |
+
):
|
885 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
886 |
+
|
887 |
+
outputs = self.beit(
|
888 |
+
pixel_values,
|
889 |
+
deterministic=deterministic,
|
890 |
+
output_attentions=output_attentions,
|
891 |
+
output_hidden_states=output_hidden_states,
|
892 |
+
return_dict=return_dict,
|
893 |
+
)
|
894 |
+
|
895 |
+
pooled_output = outputs[1]
|
896 |
+
logits = self.classifier(pooled_output)
|
897 |
+
|
898 |
+
if not return_dict:
|
899 |
+
output = (logits,) + outputs[2:]
|
900 |
+
return output
|
901 |
+
|
902 |
+
return FlaxSequenceClassifierOutput(
|
903 |
+
logits=logits,
|
904 |
+
hidden_states=outputs.hidden_states,
|
905 |
+
attentions=outputs.attentions,
|
906 |
+
)
|
907 |
+
|
908 |
+
|
909 |
+
@add_start_docstrings(
|
910 |
+
"""
|
911 |
+
Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
|
912 |
+
hidden states of the patch tokens) e.g. for ImageNet.
|
913 |
+
""",
|
914 |
+
BEIT_START_DOCSTRING,
|
915 |
+
)
|
916 |
+
class FlaxBeitForImageClassification(FlaxBeitPreTrainedModel):
|
917 |
+
module_class = FlaxBeitForImageClassificationModule
|
918 |
+
|
919 |
+
|
920 |
+
FLAX_BEIT_CLASSIF_DOCSTRING = """
|
921 |
+
Returns:
|
922 |
+
|
923 |
+
Example:
|
924 |
+
|
925 |
+
```python
|
926 |
+
>>> from transformers import AutoImageProcessor, FlaxBeitForImageClassification
|
927 |
+
>>> from PIL import Image
|
928 |
+
>>> import requests
|
929 |
+
|
930 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
931 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
932 |
+
|
933 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
|
934 |
+
>>> model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")
|
935 |
+
|
936 |
+
>>> inputs = image_processor(images=image, return_tensors="np")
|
937 |
+
>>> outputs = model(**inputs)
|
938 |
+
>>> logits = outputs.logits
|
939 |
+
>>> # model predicts one of the 1000 ImageNet classes
|
940 |
+
>>> predicted_class_idx = logits.argmax(-1).item()
|
941 |
+
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
|
942 |
+
```
|
943 |
+
"""
|
944 |
+
|
945 |
+
overwrite_call_docstring(FlaxBeitForImageClassification, FLAX_BEIT_CLASSIF_DOCSTRING)
|
946 |
+
append_replace_return_docstrings(
|
947 |
+
FlaxBeitForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=BeitConfig
|
948 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc
ADDED
Binary file (12.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__init__.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_tf_available,
|
20 |
+
is_torch_available,
|
21 |
+
is_vision_available,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
_import_structure = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
|
26 |
+
|
27 |
+
try:
|
28 |
+
if not is_vision_available():
|
29 |
+
raise OptionalDependencyNotAvailable()
|
30 |
+
except OptionalDependencyNotAvailable:
|
31 |
+
pass
|
32 |
+
else:
|
33 |
+
_import_structure["feature_extraction_deit"] = ["DeiTFeatureExtractor"]
|
34 |
+
_import_structure["image_processing_deit"] = ["DeiTImageProcessor"]
|
35 |
+
|
36 |
+
try:
|
37 |
+
if not is_torch_available():
|
38 |
+
raise OptionalDependencyNotAvailable()
|
39 |
+
except OptionalDependencyNotAvailable:
|
40 |
+
pass
|
41 |
+
else:
|
42 |
+
_import_structure["modeling_deit"] = [
|
43 |
+
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
44 |
+
"DeiTForImageClassification",
|
45 |
+
"DeiTForImageClassificationWithTeacher",
|
46 |
+
"DeiTForMaskedImageModeling",
|
47 |
+
"DeiTModel",
|
48 |
+
"DeiTPreTrainedModel",
|
49 |
+
]
|
50 |
+
|
51 |
+
try:
|
52 |
+
if not is_tf_available():
|
53 |
+
raise OptionalDependencyNotAvailable()
|
54 |
+
except OptionalDependencyNotAvailable:
|
55 |
+
pass
|
56 |
+
else:
|
57 |
+
_import_structure["modeling_tf_deit"] = [
|
58 |
+
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
59 |
+
"TFDeiTForImageClassification",
|
60 |
+
"TFDeiTForImageClassificationWithTeacher",
|
61 |
+
"TFDeiTForMaskedImageModeling",
|
62 |
+
"TFDeiTModel",
|
63 |
+
"TFDeiTPreTrainedModel",
|
64 |
+
]
|
65 |
+
|
66 |
+
|
67 |
+
if TYPE_CHECKING:
|
68 |
+
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
|
69 |
+
|
70 |
+
try:
|
71 |
+
if not is_vision_available():
|
72 |
+
raise OptionalDependencyNotAvailable()
|
73 |
+
except OptionalDependencyNotAvailable:
|
74 |
+
pass
|
75 |
+
else:
|
76 |
+
from .feature_extraction_deit import DeiTFeatureExtractor
|
77 |
+
from .image_processing_deit import DeiTImageProcessor
|
78 |
+
|
79 |
+
try:
|
80 |
+
if not is_torch_available():
|
81 |
+
raise OptionalDependencyNotAvailable()
|
82 |
+
except OptionalDependencyNotAvailable:
|
83 |
+
pass
|
84 |
+
else:
|
85 |
+
from .modeling_deit import (
|
86 |
+
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
87 |
+
DeiTForImageClassification,
|
88 |
+
DeiTForImageClassificationWithTeacher,
|
89 |
+
DeiTForMaskedImageModeling,
|
90 |
+
DeiTModel,
|
91 |
+
DeiTPreTrainedModel,
|
92 |
+
)
|
93 |
+
|
94 |
+
try:
|
95 |
+
if not is_tf_available():
|
96 |
+
raise OptionalDependencyNotAvailable()
|
97 |
+
except OptionalDependencyNotAvailable:
|
98 |
+
pass
|
99 |
+
else:
|
100 |
+
from .modeling_tf_deit import (
|
101 |
+
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
102 |
+
TFDeiTForImageClassification,
|
103 |
+
TFDeiTForImageClassificationWithTeacher,
|
104 |
+
TFDeiTForMaskedImageModeling,
|
105 |
+
TFDeiTModel,
|
106 |
+
TFDeiTPreTrainedModel,
|
107 |
+
)
|
108 |
+
|
109 |
+
|
110 |
+
else:
|
111 |
+
import sys
|
112 |
+
|
113 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.72 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/configuration_deit.cpython-310.pyc
ADDED
Binary file (5.42 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/convert_deit_timm_to_pytorch.cpython-310.pyc
ADDED
Binary file (6.19 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/feature_extraction_deit.cpython-310.pyc
ADDED
Binary file (1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc
ADDED
Binary file (13 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_deit.cpython-310.pyc
ADDED
Binary file (28.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc
ADDED
Binary file (36.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/configuration_deit.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 Facebook AI Research (FAIR) and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" DeiT model configuration"""
|
16 |
+
|
17 |
+
from collections import OrderedDict
|
18 |
+
from typing import Mapping
|
19 |
+
|
20 |
+
from packaging import version
|
21 |
+
|
22 |
+
from ...configuration_utils import PretrainedConfig
|
23 |
+
from ...onnx import OnnxConfig
|
24 |
+
from ...utils import logging
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
from ..deprecated._archive_maps import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
31 |
+
|
32 |
+
|
33 |
+
class DeiTConfig(PretrainedConfig):
|
34 |
+
r"""
|
35 |
+
This is the configuration class to store the configuration of a [`DeiTModel`]. It is used to instantiate an DeiT
|
36 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
37 |
+
defaults will yield a similar configuration to that of the DeiT
|
38 |
+
[facebook/deit-base-distilled-patch16-224](https://huggingface.co/facebook/deit-base-distilled-patch16-224)
|
39 |
+
architecture.
|
40 |
+
|
41 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
42 |
+
documentation from [`PretrainedConfig`] for more information.
|
43 |
+
|
44 |
+
|
45 |
+
Args:
|
46 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
47 |
+
Dimensionality of the encoder layers and the pooler layer.
|
48 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
49 |
+
Number of hidden layers in the Transformer encoder.
|
50 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
51 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
52 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
53 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
54 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
55 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
56 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
57 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
58 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
59 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
|
60 |
+
The dropout ratio for the attention probabilities.
|
61 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
62 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
63 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
64 |
+
The epsilon used by the layer normalization layers.
|
65 |
+
image_size (`int`, *optional*, defaults to 224):
|
66 |
+
The size (resolution) of each image.
|
67 |
+
patch_size (`int`, *optional*, defaults to 16):
|
68 |
+
The size (resolution) of each patch.
|
69 |
+
num_channels (`int`, *optional*, defaults to 3):
|
70 |
+
The number of input channels.
|
71 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
72 |
+
Whether to add a bias to the queries, keys and values.
|
73 |
+
encoder_stride (`int`, *optional*, defaults to 16):
|
74 |
+
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
|
75 |
+
|
76 |
+
Example:
|
77 |
+
|
78 |
+
```python
|
79 |
+
>>> from transformers import DeiTConfig, DeiTModel
|
80 |
+
|
81 |
+
>>> # Initializing a DeiT deit-base-distilled-patch16-224 style configuration
|
82 |
+
>>> configuration = DeiTConfig()
|
83 |
+
|
84 |
+
>>> # Initializing a model (with random weights) from the deit-base-distilled-patch16-224 style configuration
|
85 |
+
>>> model = DeiTModel(configuration)
|
86 |
+
|
87 |
+
>>> # Accessing the model configuration
|
88 |
+
>>> configuration = model.config
|
89 |
+
```"""
|
90 |
+
|
91 |
+
model_type = "deit"
|
92 |
+
|
93 |
+
def __init__(
|
94 |
+
self,
|
95 |
+
hidden_size=768,
|
96 |
+
num_hidden_layers=12,
|
97 |
+
num_attention_heads=12,
|
98 |
+
intermediate_size=3072,
|
99 |
+
hidden_act="gelu",
|
100 |
+
hidden_dropout_prob=0.0,
|
101 |
+
attention_probs_dropout_prob=0.0,
|
102 |
+
initializer_range=0.02,
|
103 |
+
layer_norm_eps=1e-12,
|
104 |
+
image_size=224,
|
105 |
+
patch_size=16,
|
106 |
+
num_channels=3,
|
107 |
+
qkv_bias=True,
|
108 |
+
encoder_stride=16,
|
109 |
+
**kwargs,
|
110 |
+
):
|
111 |
+
super().__init__(**kwargs)
|
112 |
+
|
113 |
+
self.hidden_size = hidden_size
|
114 |
+
self.num_hidden_layers = num_hidden_layers
|
115 |
+
self.num_attention_heads = num_attention_heads
|
116 |
+
self.intermediate_size = intermediate_size
|
117 |
+
self.hidden_act = hidden_act
|
118 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
119 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
120 |
+
self.initializer_range = initializer_range
|
121 |
+
self.layer_norm_eps = layer_norm_eps
|
122 |
+
self.image_size = image_size
|
123 |
+
self.patch_size = patch_size
|
124 |
+
self.num_channels = num_channels
|
125 |
+
self.qkv_bias = qkv_bias
|
126 |
+
self.encoder_stride = encoder_stride
|
127 |
+
|
128 |
+
|
129 |
+
class DeiTOnnxConfig(OnnxConfig):
|
130 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
131 |
+
|
132 |
+
@property
|
133 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
134 |
+
return OrderedDict(
|
135 |
+
[
|
136 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
137 |
+
]
|
138 |
+
)
|
139 |
+
|
140 |
+
@property
|
141 |
+
def atol_for_validation(self) -> float:
|
142 |
+
return 1e-4
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/convert_deit_timm_to_pytorch.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert DeiT distilled checkpoints from the timm library."""
|
16 |
+
|
17 |
+
|
18 |
+
import argparse
|
19 |
+
import json
|
20 |
+
from pathlib import Path
|
21 |
+
|
22 |
+
import requests
|
23 |
+
import timm
|
24 |
+
import torch
|
25 |
+
from huggingface_hub import hf_hub_download
|
26 |
+
from PIL import Image
|
27 |
+
|
28 |
+
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
|
29 |
+
from transformers.utils import logging
|
30 |
+
|
31 |
+
|
32 |
+
logging.set_verbosity_info()
|
33 |
+
logger = logging.get_logger(__name__)
|
34 |
+
|
35 |
+
|
36 |
+
# here we list all keys to be renamed (original name on the left, our name on the right)
|
37 |
+
def create_rename_keys(config, base_model=False):
|
38 |
+
rename_keys = []
|
39 |
+
for i in range(config.num_hidden_layers):
|
40 |
+
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
|
41 |
+
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight"))
|
42 |
+
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias"))
|
43 |
+
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight"))
|
44 |
+
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias"))
|
45 |
+
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight"))
|
46 |
+
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias"))
|
47 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight"))
|
48 |
+
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias"))
|
49 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight"))
|
50 |
+
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias"))
|
51 |
+
|
52 |
+
# projection layer + position embeddings
|
53 |
+
rename_keys.extend(
|
54 |
+
[
|
55 |
+
("cls_token", "deit.embeddings.cls_token"),
|
56 |
+
("dist_token", "deit.embeddings.distillation_token"),
|
57 |
+
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
|
58 |
+
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
|
59 |
+
("pos_embed", "deit.embeddings.position_embeddings"),
|
60 |
+
]
|
61 |
+
)
|
62 |
+
|
63 |
+
if base_model:
|
64 |
+
# layernorm + pooler
|
65 |
+
rename_keys.extend(
|
66 |
+
[
|
67 |
+
("norm.weight", "layernorm.weight"),
|
68 |
+
("norm.bias", "layernorm.bias"),
|
69 |
+
("pre_logits.fc.weight", "pooler.dense.weight"),
|
70 |
+
("pre_logits.fc.bias", "pooler.dense.bias"),
|
71 |
+
]
|
72 |
+
)
|
73 |
+
|
74 |
+
# if just the base model, we should remove "deit" from all keys that start with "deit"
|
75 |
+
rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("deit") else pair for pair in rename_keys]
|
76 |
+
else:
|
77 |
+
# layernorm + classification heads
|
78 |
+
rename_keys.extend(
|
79 |
+
[
|
80 |
+
("norm.weight", "deit.layernorm.weight"),
|
81 |
+
("norm.bias", "deit.layernorm.bias"),
|
82 |
+
("head.weight", "cls_classifier.weight"),
|
83 |
+
("head.bias", "cls_classifier.bias"),
|
84 |
+
("head_dist.weight", "distillation_classifier.weight"),
|
85 |
+
("head_dist.bias", "distillation_classifier.bias"),
|
86 |
+
]
|
87 |
+
)
|
88 |
+
|
89 |
+
return rename_keys
|
90 |
+
|
91 |
+
|
92 |
+
# we split up the matrix of each encoder layer into queries, keys and values
|
93 |
+
def read_in_q_k_v(state_dict, config, base_model=False):
|
94 |
+
for i in range(config.num_hidden_layers):
|
95 |
+
if base_model:
|
96 |
+
prefix = ""
|
97 |
+
else:
|
98 |
+
prefix = "deit."
|
99 |
+
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
|
100 |
+
in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
|
101 |
+
in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
|
102 |
+
# next, add query, keys and values (in that order) to the state dict
|
103 |
+
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
|
104 |
+
: config.hidden_size, :
|
105 |
+
]
|
106 |
+
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
|
107 |
+
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
|
108 |
+
config.hidden_size : config.hidden_size * 2, :
|
109 |
+
]
|
110 |
+
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
|
111 |
+
config.hidden_size : config.hidden_size * 2
|
112 |
+
]
|
113 |
+
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
|
114 |
+
-config.hidden_size :, :
|
115 |
+
]
|
116 |
+
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
|
117 |
+
|
118 |
+
|
119 |
+
def rename_key(dct, old, new):
|
120 |
+
val = dct.pop(old)
|
121 |
+
dct[new] = val
|
122 |
+
|
123 |
+
|
124 |
+
# We will verify our results on an image of cute cats
|
125 |
+
def prepare_img():
|
126 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
127 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
128 |
+
return im
|
129 |
+
|
130 |
+
|
131 |
+
@torch.no_grad()
|
132 |
+
def convert_deit_checkpoint(deit_name, pytorch_dump_folder_path):
|
133 |
+
"""
|
134 |
+
Copy/paste/tweak model's weights to our DeiT structure.
|
135 |
+
"""
|
136 |
+
|
137 |
+
# define default DeiT configuration
|
138 |
+
config = DeiTConfig()
|
139 |
+
# all deit models have fine-tuned heads
|
140 |
+
base_model = False
|
141 |
+
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
|
142 |
+
config.num_labels = 1000
|
143 |
+
repo_id = "huggingface/label-files"
|
144 |
+
filename = "imagenet-1k-id2label.json"
|
145 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
146 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
147 |
+
config.id2label = id2label
|
148 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
149 |
+
config.patch_size = int(deit_name[-6:-4])
|
150 |
+
config.image_size = int(deit_name[-3:])
|
151 |
+
# size of the architecture
|
152 |
+
if deit_name[9:].startswith("tiny"):
|
153 |
+
config.hidden_size = 192
|
154 |
+
config.intermediate_size = 768
|
155 |
+
config.num_hidden_layers = 12
|
156 |
+
config.num_attention_heads = 3
|
157 |
+
elif deit_name[9:].startswith("small"):
|
158 |
+
config.hidden_size = 384
|
159 |
+
config.intermediate_size = 1536
|
160 |
+
config.num_hidden_layers = 12
|
161 |
+
config.num_attention_heads = 6
|
162 |
+
if deit_name[9:].startswith("base"):
|
163 |
+
pass
|
164 |
+
elif deit_name[4:].startswith("large"):
|
165 |
+
config.hidden_size = 1024
|
166 |
+
config.intermediate_size = 4096
|
167 |
+
config.num_hidden_layers = 24
|
168 |
+
config.num_attention_heads = 16
|
169 |
+
|
170 |
+
# load original model from timm
|
171 |
+
timm_model = timm.create_model(deit_name, pretrained=True)
|
172 |
+
timm_model.eval()
|
173 |
+
|
174 |
+
# load state_dict of original model, remove and rename some keys
|
175 |
+
state_dict = timm_model.state_dict()
|
176 |
+
rename_keys = create_rename_keys(config, base_model)
|
177 |
+
for src, dest in rename_keys:
|
178 |
+
rename_key(state_dict, src, dest)
|
179 |
+
read_in_q_k_v(state_dict, config, base_model)
|
180 |
+
|
181 |
+
# load HuggingFace model
|
182 |
+
model = DeiTForImageClassificationWithTeacher(config).eval()
|
183 |
+
model.load_state_dict(state_dict)
|
184 |
+
|
185 |
+
# Check outputs on an image, prepared by DeiTImageProcessor
|
186 |
+
size = int(
|
187 |
+
(256 / 224) * config.image_size
|
188 |
+
) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
|
189 |
+
image_processor = DeiTImageProcessor(size=size, crop_size=config.image_size)
|
190 |
+
encoding = image_processor(images=prepare_img(), return_tensors="pt")
|
191 |
+
pixel_values = encoding["pixel_values"]
|
192 |
+
outputs = model(pixel_values)
|
193 |
+
|
194 |
+
timm_logits = timm_model(pixel_values)
|
195 |
+
assert timm_logits.shape == outputs.logits.shape
|
196 |
+
assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
|
197 |
+
|
198 |
+
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
199 |
+
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}")
|
200 |
+
model.save_pretrained(pytorch_dump_folder_path)
|
201 |
+
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
202 |
+
image_processor.save_pretrained(pytorch_dump_folder_path)
|
203 |
+
|
204 |
+
|
205 |
+
if __name__ == "__main__":
|
206 |
+
parser = argparse.ArgumentParser()
|
207 |
+
# Required parameters
|
208 |
+
parser.add_argument(
|
209 |
+
"--deit_name",
|
210 |
+
default="vit_deit_base_distilled_patch16_224",
|
211 |
+
type=str,
|
212 |
+
help="Name of the DeiT timm model you'd like to convert.",
|
213 |
+
)
|
214 |
+
parser.add_argument(
|
215 |
+
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
|
216 |
+
)
|
217 |
+
|
218 |
+
args = parser.parse_args()
|
219 |
+
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/feature_extraction_deit.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Feature extractor class for DeiT."""
|
16 |
+
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
from ...utils import logging
|
20 |
+
from .image_processing_deit import DeiTImageProcessor
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
class DeiTFeatureExtractor(DeiTImageProcessor):
|
27 |
+
def __init__(self, *args, **kwargs) -> None:
|
28 |
+
warnings.warn(
|
29 |
+
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
|
30 |
+
" use DeiTImageProcessor instead.",
|
31 |
+
FutureWarning,
|
32 |
+
)
|
33 |
+
super().__init__(*args, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/image_processing_deit.py
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for DeiT."""
|
16 |
+
|
17 |
+
from typing import Dict, List, Optional, Union
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
22 |
+
from ...image_transforms import resize, to_channel_dimension_format
|
23 |
+
from ...image_utils import (
|
24 |
+
IMAGENET_STANDARD_MEAN,
|
25 |
+
IMAGENET_STANDARD_STD,
|
26 |
+
ChannelDimension,
|
27 |
+
ImageInput,
|
28 |
+
PILImageResampling,
|
29 |
+
infer_channel_dimension_format,
|
30 |
+
is_scaled_image,
|
31 |
+
make_list_of_images,
|
32 |
+
to_numpy_array,
|
33 |
+
valid_images,
|
34 |
+
validate_kwargs,
|
35 |
+
validate_preprocess_arguments,
|
36 |
+
)
|
37 |
+
from ...utils import TensorType, is_vision_available, logging
|
38 |
+
|
39 |
+
|
40 |
+
if is_vision_available():
|
41 |
+
import PIL
|
42 |
+
|
43 |
+
|
44 |
+
logger = logging.get_logger(__name__)
|
45 |
+
|
46 |
+
|
47 |
+
class DeiTImageProcessor(BaseImageProcessor):
|
48 |
+
r"""
|
49 |
+
Constructs a DeiT image processor.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
53 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
|
54 |
+
`do_resize` in `preprocess`.
|
55 |
+
size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
|
56 |
+
Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
|
57 |
+
resample (`PILImageResampling` filter, *optional*, defaults to `Resampling.BICUBIC`):
|
58 |
+
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
|
59 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
60 |
+
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
|
61 |
+
is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
|
62 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
|
63 |
+
Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
|
64 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
65 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
66 |
+
`preprocess` method.
|
67 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
68 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
69 |
+
parameter in the `preprocess` method.
|
70 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
71 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
72 |
+
method.
|
73 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
74 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
75 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
76 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
77 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
78 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
79 |
+
"""
|
80 |
+
|
81 |
+
model_input_names = ["pixel_values"]
|
82 |
+
|
83 |
+
def __init__(
|
84 |
+
self,
|
85 |
+
do_resize: bool = True,
|
86 |
+
size: Dict[str, int] = None,
|
87 |
+
resample: PILImageResampling = PIL.Image.BICUBIC,
|
88 |
+
do_center_crop: bool = True,
|
89 |
+
crop_size: Dict[str, int] = None,
|
90 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
91 |
+
do_rescale: bool = True,
|
92 |
+
do_normalize: bool = True,
|
93 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
94 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
95 |
+
**kwargs,
|
96 |
+
) -> None:
|
97 |
+
super().__init__(**kwargs)
|
98 |
+
size = size if size is not None else {"height": 256, "width": 256}
|
99 |
+
size = get_size_dict(size)
|
100 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
101 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
102 |
+
|
103 |
+
self.do_resize = do_resize
|
104 |
+
self.size = size
|
105 |
+
self.resample = resample
|
106 |
+
self.do_center_crop = do_center_crop
|
107 |
+
self.crop_size = crop_size
|
108 |
+
self.do_rescale = do_rescale
|
109 |
+
self.rescale_factor = rescale_factor
|
110 |
+
self.do_normalize = do_normalize
|
111 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
|
112 |
+
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
|
113 |
+
self._valid_processor_keys = [
|
114 |
+
"images",
|
115 |
+
"do_resize",
|
116 |
+
"size",
|
117 |
+
"resample",
|
118 |
+
"do_center_crop",
|
119 |
+
"crop_size",
|
120 |
+
"do_rescale",
|
121 |
+
"rescale_factor",
|
122 |
+
"do_normalize",
|
123 |
+
"image_mean",
|
124 |
+
"image_std",
|
125 |
+
"return_tensors",
|
126 |
+
"data_format",
|
127 |
+
"input_data_format",
|
128 |
+
]
|
129 |
+
|
130 |
+
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
|
131 |
+
def resize(
|
132 |
+
self,
|
133 |
+
image: np.ndarray,
|
134 |
+
size: Dict[str, int],
|
135 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
136 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
137 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
138 |
+
**kwargs,
|
139 |
+
) -> np.ndarray:
|
140 |
+
"""
|
141 |
+
Resize an image to `(size["height"], size["width"])`.
|
142 |
+
|
143 |
+
Args:
|
144 |
+
image (`np.ndarray`):
|
145 |
+
Image to resize.
|
146 |
+
size (`Dict[str, int]`):
|
147 |
+
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
|
148 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
149 |
+
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
|
150 |
+
data_format (`ChannelDimension` or `str`, *optional*):
|
151 |
+
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
152 |
+
image is used. Can be one of:
|
153 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
154 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
155 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
156 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
157 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
158 |
+
from the input image. Can be one of:
|
159 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
160 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
161 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
162 |
+
|
163 |
+
Returns:
|
164 |
+
`np.ndarray`: The resized image.
|
165 |
+
"""
|
166 |
+
size = get_size_dict(size)
|
167 |
+
if "height" not in size or "width" not in size:
|
168 |
+
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
|
169 |
+
output_size = (size["height"], size["width"])
|
170 |
+
return resize(
|
171 |
+
image,
|
172 |
+
size=output_size,
|
173 |
+
resample=resample,
|
174 |
+
data_format=data_format,
|
175 |
+
input_data_format=input_data_format,
|
176 |
+
**kwargs,
|
177 |
+
)
|
178 |
+
|
179 |
+
def preprocess(
|
180 |
+
self,
|
181 |
+
images: ImageInput,
|
182 |
+
do_resize: bool = None,
|
183 |
+
size: Dict[str, int] = None,
|
184 |
+
resample=None,
|
185 |
+
do_center_crop: bool = None,
|
186 |
+
crop_size: Dict[str, int] = None,
|
187 |
+
do_rescale: bool = None,
|
188 |
+
rescale_factor: float = None,
|
189 |
+
do_normalize: bool = None,
|
190 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
191 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
192 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
193 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
194 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
195 |
+
**kwargs,
|
196 |
+
) -> PIL.Image.Image:
|
197 |
+
"""
|
198 |
+
Preprocess an image or batch of images.
|
199 |
+
|
200 |
+
Args:
|
201 |
+
images (`ImageInput`):
|
202 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
203 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
204 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
205 |
+
Whether to resize the image.
|
206 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
207 |
+
Size of the image after `resize`.
|
208 |
+
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
|
209 |
+
PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
|
210 |
+
`True`.
|
211 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
212 |
+
Whether to center crop the image.
|
213 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
214 |
+
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
|
215 |
+
padded with zeros and then cropped
|
216 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
217 |
+
Whether to rescale the image values between [0 - 1].
|
218 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
219 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
220 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
221 |
+
Whether to normalize the image.
|
222 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
223 |
+
Image mean.
|
224 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
225 |
+
Image standard deviation.
|
226 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
227 |
+
The type of tensors to return. Can be one of:
|
228 |
+
- `None`: Return a list of `np.ndarray`.
|
229 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
230 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
231 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
232 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
233 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
234 |
+
The channel dimension format for the output image. Can be one of:
|
235 |
+
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
236 |
+
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
237 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
238 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
239 |
+
from the input image. Can be one of:
|
240 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
241 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
242 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
243 |
+
"""
|
244 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
245 |
+
resample = resample if resample is not None else self.resample
|
246 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
247 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
248 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
249 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
250 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
251 |
+
image_std = image_std if image_std is not None else self.image_std
|
252 |
+
|
253 |
+
size = size if size is not None else self.size
|
254 |
+
size = get_size_dict(size)
|
255 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
256 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
257 |
+
|
258 |
+
images = make_list_of_images(images)
|
259 |
+
|
260 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
261 |
+
|
262 |
+
if not valid_images(images):
|
263 |
+
raise ValueError(
|
264 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
265 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
266 |
+
)
|
267 |
+
validate_preprocess_arguments(
|
268 |
+
do_rescale=do_rescale,
|
269 |
+
rescale_factor=rescale_factor,
|
270 |
+
do_normalize=do_normalize,
|
271 |
+
image_mean=image_mean,
|
272 |
+
image_std=image_std,
|
273 |
+
do_center_crop=do_center_crop,
|
274 |
+
crop_size=crop_size,
|
275 |
+
do_resize=do_resize,
|
276 |
+
size=size,
|
277 |
+
resample=resample,
|
278 |
+
)
|
279 |
+
# All transformations expect numpy arrays.
|
280 |
+
images = [to_numpy_array(image) for image in images]
|
281 |
+
|
282 |
+
if is_scaled_image(images[0]) and do_rescale:
|
283 |
+
logger.warning_once(
|
284 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
285 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
286 |
+
)
|
287 |
+
|
288 |
+
if input_data_format is None:
|
289 |
+
# We assume that all images have the same channel dimension format.
|
290 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
291 |
+
|
292 |
+
if do_resize:
|
293 |
+
images = [
|
294 |
+
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
295 |
+
for image in images
|
296 |
+
]
|
297 |
+
|
298 |
+
if do_center_crop:
|
299 |
+
images = [
|
300 |
+
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
|
301 |
+
]
|
302 |
+
|
303 |
+
if do_rescale:
|
304 |
+
images = [
|
305 |
+
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
306 |
+
for image in images
|
307 |
+
]
|
308 |
+
|
309 |
+
if do_normalize:
|
310 |
+
images = [
|
311 |
+
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
312 |
+
for image in images
|
313 |
+
]
|
314 |
+
|
315 |
+
images = [
|
316 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
|
317 |
+
]
|
318 |
+
|
319 |
+
data = {"pixel_values": images}
|
320 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_deit.py
ADDED
@@ -0,0 +1,891 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 Facebook AI Research (FAIR), Ross Wightman, The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch DeiT model."""
|
16 |
+
|
17 |
+
|
18 |
+
import collections.abc
|
19 |
+
import math
|
20 |
+
from dataclasses import dataclass
|
21 |
+
from typing import Optional, Set, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.utils.checkpoint
|
25 |
+
from torch import nn
|
26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
27 |
+
|
28 |
+
from ...activations import ACT2FN
|
29 |
+
from ...modeling_outputs import (
|
30 |
+
BaseModelOutput,
|
31 |
+
BaseModelOutputWithPooling,
|
32 |
+
ImageClassifierOutput,
|
33 |
+
MaskedImageModelingOutput,
|
34 |
+
)
|
35 |
+
from ...modeling_utils import PreTrainedModel
|
36 |
+
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
37 |
+
from ...utils import (
|
38 |
+
ModelOutput,
|
39 |
+
add_code_sample_docstrings,
|
40 |
+
add_start_docstrings,
|
41 |
+
add_start_docstrings_to_model_forward,
|
42 |
+
logging,
|
43 |
+
replace_return_docstrings,
|
44 |
+
)
|
45 |
+
from .configuration_deit import DeiTConfig
|
46 |
+
|
47 |
+
|
48 |
+
logger = logging.get_logger(__name__)
|
49 |
+
|
50 |
+
# General docstring
|
51 |
+
_CONFIG_FOR_DOC = "DeiTConfig"
|
52 |
+
|
53 |
+
# Base docstring
|
54 |
+
_CHECKPOINT_FOR_DOC = "facebook/deit-base-distilled-patch16-224"
|
55 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 198, 768]
|
56 |
+
|
57 |
+
# Image classification docstring
|
58 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/deit-base-distilled-patch16-224"
|
59 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
60 |
+
|
61 |
+
|
62 |
+
from ..deprecated._archive_maps import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
63 |
+
|
64 |
+
|
65 |
+
class DeiTEmbeddings(nn.Module):
|
66 |
+
"""
|
67 |
+
Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token.
|
68 |
+
"""
|
69 |
+
|
70 |
+
def __init__(self, config: DeiTConfig, use_mask_token: bool = False) -> None:
|
71 |
+
super().__init__()
|
72 |
+
|
73 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
74 |
+
self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
|
75 |
+
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
|
76 |
+
self.patch_embeddings = DeiTPatchEmbeddings(config)
|
77 |
+
num_patches = self.patch_embeddings.num_patches
|
78 |
+
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size))
|
79 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
80 |
+
|
81 |
+
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
|
82 |
+
embeddings = self.patch_embeddings(pixel_values)
|
83 |
+
batch_size, seq_length, _ = embeddings.size()
|
84 |
+
|
85 |
+
if bool_masked_pos is not None:
|
86 |
+
mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
|
87 |
+
# replace the masked visual tokens by mask_tokens
|
88 |
+
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
|
89 |
+
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
|
90 |
+
|
91 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
92 |
+
distillation_tokens = self.distillation_token.expand(batch_size, -1, -1)
|
93 |
+
embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1)
|
94 |
+
embeddings = embeddings + self.position_embeddings
|
95 |
+
embeddings = self.dropout(embeddings)
|
96 |
+
return embeddings
|
97 |
+
|
98 |
+
|
99 |
+
class DeiTPatchEmbeddings(nn.Module):
|
100 |
+
"""
|
101 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
102 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
|
103 |
+
Transformer.
|
104 |
+
"""
|
105 |
+
|
106 |
+
def __init__(self, config):
|
107 |
+
super().__init__()
|
108 |
+
image_size, patch_size = config.image_size, config.patch_size
|
109 |
+
num_channels, hidden_size = config.num_channels, config.hidden_size
|
110 |
+
|
111 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
112 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
113 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
114 |
+
self.image_size = image_size
|
115 |
+
self.patch_size = patch_size
|
116 |
+
self.num_channels = num_channels
|
117 |
+
self.num_patches = num_patches
|
118 |
+
|
119 |
+
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
|
120 |
+
|
121 |
+
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
122 |
+
batch_size, num_channels, height, width = pixel_values.shape
|
123 |
+
if num_channels != self.num_channels:
|
124 |
+
raise ValueError(
|
125 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
126 |
+
)
|
127 |
+
if height != self.image_size[0] or width != self.image_size[1]:
|
128 |
+
raise ValueError(
|
129 |
+
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
|
130 |
+
)
|
131 |
+
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
|
132 |
+
return x
|
133 |
+
|
134 |
+
|
135 |
+
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->DeiT
|
136 |
+
class DeiTSelfAttention(nn.Module):
|
137 |
+
def __init__(self, config: DeiTConfig) -> None:
|
138 |
+
super().__init__()
|
139 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
140 |
+
raise ValueError(
|
141 |
+
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
|
142 |
+
f"heads {config.num_attention_heads}."
|
143 |
+
)
|
144 |
+
|
145 |
+
self.num_attention_heads = config.num_attention_heads
|
146 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
147 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
148 |
+
|
149 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
150 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
151 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
152 |
+
|
153 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
154 |
+
|
155 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
156 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
157 |
+
x = x.view(new_x_shape)
|
158 |
+
return x.permute(0, 2, 1, 3)
|
159 |
+
|
160 |
+
def forward(
|
161 |
+
self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
|
162 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
163 |
+
mixed_query_layer = self.query(hidden_states)
|
164 |
+
|
165 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
166 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
167 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
168 |
+
|
169 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
170 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
171 |
+
|
172 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
173 |
+
|
174 |
+
# Normalize the attention scores to probabilities.
|
175 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
176 |
+
|
177 |
+
# This is actually dropping out entire tokens to attend to, which might
|
178 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
179 |
+
attention_probs = self.dropout(attention_probs)
|
180 |
+
|
181 |
+
# Mask heads if we want to
|
182 |
+
if head_mask is not None:
|
183 |
+
attention_probs = attention_probs * head_mask
|
184 |
+
|
185 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
186 |
+
|
187 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
188 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
189 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
190 |
+
|
191 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
192 |
+
|
193 |
+
return outputs
|
194 |
+
|
195 |
+
|
196 |
+
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->DeiT
|
197 |
+
class DeiTSelfOutput(nn.Module):
|
198 |
+
"""
|
199 |
+
The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
|
200 |
+
layernorm applied before each block.
|
201 |
+
"""
|
202 |
+
|
203 |
+
def __init__(self, config: DeiTConfig) -> None:
|
204 |
+
super().__init__()
|
205 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
206 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
207 |
+
|
208 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
209 |
+
hidden_states = self.dense(hidden_states)
|
210 |
+
hidden_states = self.dropout(hidden_states)
|
211 |
+
|
212 |
+
return hidden_states
|
213 |
+
|
214 |
+
|
215 |
+
# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->DeiT
|
216 |
+
class DeiTAttention(nn.Module):
|
217 |
+
def __init__(self, config: DeiTConfig) -> None:
|
218 |
+
super().__init__()
|
219 |
+
self.attention = DeiTSelfAttention(config)
|
220 |
+
self.output = DeiTSelfOutput(config)
|
221 |
+
self.pruned_heads = set()
|
222 |
+
|
223 |
+
def prune_heads(self, heads: Set[int]) -> None:
|
224 |
+
if len(heads) == 0:
|
225 |
+
return
|
226 |
+
heads, index = find_pruneable_heads_and_indices(
|
227 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
228 |
+
)
|
229 |
+
|
230 |
+
# Prune linear layers
|
231 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
232 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
233 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
234 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
235 |
+
|
236 |
+
# Update hyper params and store pruned heads
|
237 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
238 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
239 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
240 |
+
|
241 |
+
def forward(
|
242 |
+
self,
|
243 |
+
hidden_states: torch.Tensor,
|
244 |
+
head_mask: Optional[torch.Tensor] = None,
|
245 |
+
output_attentions: bool = False,
|
246 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
247 |
+
self_outputs = self.attention(hidden_states, head_mask, output_attentions)
|
248 |
+
|
249 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
250 |
+
|
251 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
252 |
+
return outputs
|
253 |
+
|
254 |
+
|
255 |
+
# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->DeiT
|
256 |
+
class DeiTIntermediate(nn.Module):
|
257 |
+
def __init__(self, config: DeiTConfig) -> None:
|
258 |
+
super().__init__()
|
259 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
260 |
+
if isinstance(config.hidden_act, str):
|
261 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
262 |
+
else:
|
263 |
+
self.intermediate_act_fn = config.hidden_act
|
264 |
+
|
265 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
266 |
+
hidden_states = self.dense(hidden_states)
|
267 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
268 |
+
|
269 |
+
return hidden_states
|
270 |
+
|
271 |
+
|
272 |
+
# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->DeiT
|
273 |
+
class DeiTOutput(nn.Module):
|
274 |
+
def __init__(self, config: DeiTConfig) -> None:
|
275 |
+
super().__init__()
|
276 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
277 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
278 |
+
|
279 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
280 |
+
hidden_states = self.dense(hidden_states)
|
281 |
+
hidden_states = self.dropout(hidden_states)
|
282 |
+
|
283 |
+
hidden_states = hidden_states + input_tensor
|
284 |
+
|
285 |
+
return hidden_states
|
286 |
+
|
287 |
+
|
288 |
+
# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->DeiT
|
289 |
+
class DeiTLayer(nn.Module):
|
290 |
+
"""This corresponds to the Block class in the timm implementation."""
|
291 |
+
|
292 |
+
def __init__(self, config: DeiTConfig) -> None:
|
293 |
+
super().__init__()
|
294 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
295 |
+
self.seq_len_dim = 1
|
296 |
+
self.attention = DeiTAttention(config)
|
297 |
+
self.intermediate = DeiTIntermediate(config)
|
298 |
+
self.output = DeiTOutput(config)
|
299 |
+
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
300 |
+
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
301 |
+
|
302 |
+
def forward(
|
303 |
+
self,
|
304 |
+
hidden_states: torch.Tensor,
|
305 |
+
head_mask: Optional[torch.Tensor] = None,
|
306 |
+
output_attentions: bool = False,
|
307 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
308 |
+
self_attention_outputs = self.attention(
|
309 |
+
self.layernorm_before(hidden_states), # in DeiT, layernorm is applied before self-attention
|
310 |
+
head_mask,
|
311 |
+
output_attentions=output_attentions,
|
312 |
+
)
|
313 |
+
attention_output = self_attention_outputs[0]
|
314 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
315 |
+
|
316 |
+
# first residual connection
|
317 |
+
hidden_states = attention_output + hidden_states
|
318 |
+
|
319 |
+
# in DeiT, layernorm is also applied after self-attention
|
320 |
+
layer_output = self.layernorm_after(hidden_states)
|
321 |
+
layer_output = self.intermediate(layer_output)
|
322 |
+
|
323 |
+
# second residual connection is done here
|
324 |
+
layer_output = self.output(layer_output, hidden_states)
|
325 |
+
|
326 |
+
outputs = (layer_output,) + outputs
|
327 |
+
|
328 |
+
return outputs
|
329 |
+
|
330 |
+
|
331 |
+
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->DeiT
|
332 |
+
class DeiTEncoder(nn.Module):
|
333 |
+
def __init__(self, config: DeiTConfig) -> None:
|
334 |
+
super().__init__()
|
335 |
+
self.config = config
|
336 |
+
self.layer = nn.ModuleList([DeiTLayer(config) for _ in range(config.num_hidden_layers)])
|
337 |
+
self.gradient_checkpointing = False
|
338 |
+
|
339 |
+
def forward(
|
340 |
+
self,
|
341 |
+
hidden_states: torch.Tensor,
|
342 |
+
head_mask: Optional[torch.Tensor] = None,
|
343 |
+
output_attentions: bool = False,
|
344 |
+
output_hidden_states: bool = False,
|
345 |
+
return_dict: bool = True,
|
346 |
+
) -> Union[tuple, BaseModelOutput]:
|
347 |
+
all_hidden_states = () if output_hidden_states else None
|
348 |
+
all_self_attentions = () if output_attentions else None
|
349 |
+
|
350 |
+
for i, layer_module in enumerate(self.layer):
|
351 |
+
if output_hidden_states:
|
352 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
353 |
+
|
354 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
355 |
+
|
356 |
+
if self.gradient_checkpointing and self.training:
|
357 |
+
layer_outputs = self._gradient_checkpointing_func(
|
358 |
+
layer_module.__call__,
|
359 |
+
hidden_states,
|
360 |
+
layer_head_mask,
|
361 |
+
output_attentions,
|
362 |
+
)
|
363 |
+
else:
|
364 |
+
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
|
365 |
+
|
366 |
+
hidden_states = layer_outputs[0]
|
367 |
+
|
368 |
+
if output_attentions:
|
369 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
370 |
+
|
371 |
+
if output_hidden_states:
|
372 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
373 |
+
|
374 |
+
if not return_dict:
|
375 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
376 |
+
return BaseModelOutput(
|
377 |
+
last_hidden_state=hidden_states,
|
378 |
+
hidden_states=all_hidden_states,
|
379 |
+
attentions=all_self_attentions,
|
380 |
+
)
|
381 |
+
|
382 |
+
|
383 |
+
class DeiTPreTrainedModel(PreTrainedModel):
|
384 |
+
"""
|
385 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
386 |
+
models.
|
387 |
+
"""
|
388 |
+
|
389 |
+
config_class = DeiTConfig
|
390 |
+
base_model_prefix = "deit"
|
391 |
+
main_input_name = "pixel_values"
|
392 |
+
supports_gradient_checkpointing = True
|
393 |
+
_no_split_modules = ["DeiTLayer"]
|
394 |
+
|
395 |
+
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
|
396 |
+
"""Initialize the weights"""
|
397 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
398 |
+
# Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
|
399 |
+
# `trunc_normal_cpu` not implemented in `half` issues
|
400 |
+
module.weight.data = nn.init.trunc_normal_(
|
401 |
+
module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
|
402 |
+
).to(module.weight.dtype)
|
403 |
+
if module.bias is not None:
|
404 |
+
module.bias.data.zero_()
|
405 |
+
elif isinstance(module, nn.LayerNorm):
|
406 |
+
module.bias.data.zero_()
|
407 |
+
module.weight.data.fill_(1.0)
|
408 |
+
|
409 |
+
|
410 |
+
DEIT_START_DOCSTRING = r"""
|
411 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
412 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
413 |
+
behavior.
|
414 |
+
|
415 |
+
Parameters:
|
416 |
+
config ([`DeiTConfig`]): Model configuration class with all the parameters of the model.
|
417 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
418 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
419 |
+
"""
|
420 |
+
|
421 |
+
DEIT_INPUTS_DOCSTRING = r"""
|
422 |
+
Args:
|
423 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
424 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
425 |
+
[`DeiTImageProcessor.__call__`] for details.
|
426 |
+
|
427 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
428 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
429 |
+
|
430 |
+
- 1 indicates the head is **not masked**,
|
431 |
+
- 0 indicates the head is **masked**.
|
432 |
+
|
433 |
+
output_attentions (`bool`, *optional*):
|
434 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
435 |
+
tensors for more detail.
|
436 |
+
output_hidden_states (`bool`, *optional*):
|
437 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
438 |
+
more detail.
|
439 |
+
return_dict (`bool`, *optional*):
|
440 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
441 |
+
"""
|
442 |
+
|
443 |
+
|
444 |
+
@add_start_docstrings(
|
445 |
+
"The bare DeiT Model transformer outputting raw hidden-states without any specific head on top.",
|
446 |
+
DEIT_START_DOCSTRING,
|
447 |
+
)
|
448 |
+
class DeiTModel(DeiTPreTrainedModel):
|
449 |
+
def __init__(self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False) -> None:
|
450 |
+
super().__init__(config)
|
451 |
+
self.config = config
|
452 |
+
|
453 |
+
self.embeddings = DeiTEmbeddings(config, use_mask_token=use_mask_token)
|
454 |
+
self.encoder = DeiTEncoder(config)
|
455 |
+
|
456 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
457 |
+
self.pooler = DeiTPooler(config) if add_pooling_layer else None
|
458 |
+
|
459 |
+
# Initialize weights and apply final processing
|
460 |
+
self.post_init()
|
461 |
+
|
462 |
+
def get_input_embeddings(self) -> DeiTPatchEmbeddings:
|
463 |
+
return self.embeddings.patch_embeddings
|
464 |
+
|
465 |
+
def _prune_heads(self, heads_to_prune):
|
466 |
+
"""
|
467 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
468 |
+
class PreTrainedModel
|
469 |
+
"""
|
470 |
+
for layer, heads in heads_to_prune.items():
|
471 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
472 |
+
|
473 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
474 |
+
@add_code_sample_docstrings(
|
475 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
476 |
+
output_type=BaseModelOutputWithPooling,
|
477 |
+
config_class=_CONFIG_FOR_DOC,
|
478 |
+
modality="vision",
|
479 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
480 |
+
)
|
481 |
+
def forward(
|
482 |
+
self,
|
483 |
+
pixel_values: Optional[torch.Tensor] = None,
|
484 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
485 |
+
head_mask: Optional[torch.Tensor] = None,
|
486 |
+
output_attentions: Optional[bool] = None,
|
487 |
+
output_hidden_states: Optional[bool] = None,
|
488 |
+
return_dict: Optional[bool] = None,
|
489 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
490 |
+
r"""
|
491 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
|
492 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
493 |
+
"""
|
494 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
495 |
+
output_hidden_states = (
|
496 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
497 |
+
)
|
498 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
499 |
+
|
500 |
+
if pixel_values is None:
|
501 |
+
raise ValueError("You have to specify pixel_values")
|
502 |
+
|
503 |
+
# Prepare head mask if needed
|
504 |
+
# 1.0 in head_mask indicate we keep the head
|
505 |
+
# attention_probs has shape bsz x n_heads x N x N
|
506 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
507 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
508 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
509 |
+
|
510 |
+
# TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?)
|
511 |
+
expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype
|
512 |
+
if pixel_values.dtype != expected_dtype:
|
513 |
+
pixel_values = pixel_values.to(expected_dtype)
|
514 |
+
|
515 |
+
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
|
516 |
+
|
517 |
+
encoder_outputs = self.encoder(
|
518 |
+
embedding_output,
|
519 |
+
head_mask=head_mask,
|
520 |
+
output_attentions=output_attentions,
|
521 |
+
output_hidden_states=output_hidden_states,
|
522 |
+
return_dict=return_dict,
|
523 |
+
)
|
524 |
+
sequence_output = encoder_outputs[0]
|
525 |
+
sequence_output = self.layernorm(sequence_output)
|
526 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
527 |
+
|
528 |
+
if not return_dict:
|
529 |
+
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
|
530 |
+
return head_outputs + encoder_outputs[1:]
|
531 |
+
|
532 |
+
return BaseModelOutputWithPooling(
|
533 |
+
last_hidden_state=sequence_output,
|
534 |
+
pooler_output=pooled_output,
|
535 |
+
hidden_states=encoder_outputs.hidden_states,
|
536 |
+
attentions=encoder_outputs.attentions,
|
537 |
+
)
|
538 |
+
|
539 |
+
|
540 |
+
# Copied from transformers.models.vit.modeling_vit.ViTPooler with ViT->DeiT
|
541 |
+
class DeiTPooler(nn.Module):
|
542 |
+
def __init__(self, config: DeiTConfig):
|
543 |
+
super().__init__()
|
544 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
545 |
+
self.activation = nn.Tanh()
|
546 |
+
|
547 |
+
def forward(self, hidden_states):
|
548 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
549 |
+
# to the first token.
|
550 |
+
first_token_tensor = hidden_states[:, 0]
|
551 |
+
pooled_output = self.dense(first_token_tensor)
|
552 |
+
pooled_output = self.activation(pooled_output)
|
553 |
+
return pooled_output
|
554 |
+
|
555 |
+
|
556 |
+
@add_start_docstrings(
|
557 |
+
"""DeiT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).
|
558 |
+
|
559 |
+
<Tip>
|
560 |
+
|
561 |
+
Note that we provide a script to pre-train this model on custom data in our [examples
|
562 |
+
directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
|
563 |
+
|
564 |
+
</Tip>
|
565 |
+
""",
|
566 |
+
DEIT_START_DOCSTRING,
|
567 |
+
)
|
568 |
+
class DeiTForMaskedImageModeling(DeiTPreTrainedModel):
|
569 |
+
def __init__(self, config: DeiTConfig) -> None:
|
570 |
+
super().__init__(config)
|
571 |
+
|
572 |
+
self.deit = DeiTModel(config, add_pooling_layer=False, use_mask_token=True)
|
573 |
+
|
574 |
+
self.decoder = nn.Sequential(
|
575 |
+
nn.Conv2d(
|
576 |
+
in_channels=config.hidden_size,
|
577 |
+
out_channels=config.encoder_stride**2 * config.num_channels,
|
578 |
+
kernel_size=1,
|
579 |
+
),
|
580 |
+
nn.PixelShuffle(config.encoder_stride),
|
581 |
+
)
|
582 |
+
|
583 |
+
# Initialize weights and apply final processing
|
584 |
+
self.post_init()
|
585 |
+
|
586 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
587 |
+
@replace_return_docstrings(output_type=MaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
|
588 |
+
def forward(
|
589 |
+
self,
|
590 |
+
pixel_values: Optional[torch.Tensor] = None,
|
591 |
+
bool_masked_pos: Optional[torch.BoolTensor] = None,
|
592 |
+
head_mask: Optional[torch.Tensor] = None,
|
593 |
+
output_attentions: Optional[bool] = None,
|
594 |
+
output_hidden_states: Optional[bool] = None,
|
595 |
+
return_dict: Optional[bool] = None,
|
596 |
+
) -> Union[tuple, MaskedImageModelingOutput]:
|
597 |
+
r"""
|
598 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
|
599 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
600 |
+
|
601 |
+
Returns:
|
602 |
+
|
603 |
+
Examples:
|
604 |
+
```python
|
605 |
+
>>> from transformers import AutoImageProcessor, DeiTForMaskedImageModeling
|
606 |
+
>>> import torch
|
607 |
+
>>> from PIL import Image
|
608 |
+
>>> import requests
|
609 |
+
|
610 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
611 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
612 |
+
|
613 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
614 |
+
>>> model = DeiTForMaskedImageModeling.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
615 |
+
|
616 |
+
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
|
617 |
+
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
|
618 |
+
>>> # create random boolean mask of shape (batch_size, num_patches)
|
619 |
+
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
|
620 |
+
|
621 |
+
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
|
622 |
+
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
|
623 |
+
>>> list(reconstructed_pixel_values.shape)
|
624 |
+
[1, 3, 224, 224]
|
625 |
+
```"""
|
626 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
627 |
+
|
628 |
+
outputs = self.deit(
|
629 |
+
pixel_values,
|
630 |
+
bool_masked_pos=bool_masked_pos,
|
631 |
+
head_mask=head_mask,
|
632 |
+
output_attentions=output_attentions,
|
633 |
+
output_hidden_states=output_hidden_states,
|
634 |
+
return_dict=return_dict,
|
635 |
+
)
|
636 |
+
|
637 |
+
sequence_output = outputs[0]
|
638 |
+
|
639 |
+
# Reshape to (batch_size, num_channels, height, width)
|
640 |
+
sequence_output = sequence_output[:, 1:-1]
|
641 |
+
batch_size, sequence_length, num_channels = sequence_output.shape
|
642 |
+
height = width = int(sequence_length**0.5)
|
643 |
+
sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
|
644 |
+
|
645 |
+
# Reconstruct pixel values
|
646 |
+
reconstructed_pixel_values = self.decoder(sequence_output)
|
647 |
+
|
648 |
+
masked_im_loss = None
|
649 |
+
if bool_masked_pos is not None:
|
650 |
+
size = self.config.image_size // self.config.patch_size
|
651 |
+
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
|
652 |
+
mask = (
|
653 |
+
bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
|
654 |
+
.repeat_interleave(self.config.patch_size, 2)
|
655 |
+
.unsqueeze(1)
|
656 |
+
.contiguous()
|
657 |
+
)
|
658 |
+
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
|
659 |
+
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
|
660 |
+
|
661 |
+
if not return_dict:
|
662 |
+
output = (reconstructed_pixel_values,) + outputs[1:]
|
663 |
+
return ((masked_im_loss,) + output) if masked_im_loss is not None else output
|
664 |
+
|
665 |
+
return MaskedImageModelingOutput(
|
666 |
+
loss=masked_im_loss,
|
667 |
+
reconstruction=reconstructed_pixel_values,
|
668 |
+
hidden_states=outputs.hidden_states,
|
669 |
+
attentions=outputs.attentions,
|
670 |
+
)
|
671 |
+
|
672 |
+
|
673 |
+
@add_start_docstrings(
|
674 |
+
"""
|
675 |
+
DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
|
676 |
+
the [CLS] token) e.g. for ImageNet.
|
677 |
+
""",
|
678 |
+
DEIT_START_DOCSTRING,
|
679 |
+
)
|
680 |
+
class DeiTForImageClassification(DeiTPreTrainedModel):
|
681 |
+
def __init__(self, config: DeiTConfig) -> None:
|
682 |
+
super().__init__(config)
|
683 |
+
|
684 |
+
self.num_labels = config.num_labels
|
685 |
+
self.deit = DeiTModel(config, add_pooling_layer=False)
|
686 |
+
|
687 |
+
# Classifier head
|
688 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
689 |
+
|
690 |
+
# Initialize weights and apply final processing
|
691 |
+
self.post_init()
|
692 |
+
|
693 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
694 |
+
@replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
695 |
+
def forward(
|
696 |
+
self,
|
697 |
+
pixel_values: Optional[torch.Tensor] = None,
|
698 |
+
head_mask: Optional[torch.Tensor] = None,
|
699 |
+
labels: Optional[torch.Tensor] = None,
|
700 |
+
output_attentions: Optional[bool] = None,
|
701 |
+
output_hidden_states: Optional[bool] = None,
|
702 |
+
return_dict: Optional[bool] = None,
|
703 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
704 |
+
r"""
|
705 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
706 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
707 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
708 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
709 |
+
|
710 |
+
Returns:
|
711 |
+
|
712 |
+
Examples:
|
713 |
+
|
714 |
+
```python
|
715 |
+
>>> from transformers import AutoImageProcessor, DeiTForImageClassification
|
716 |
+
>>> import torch
|
717 |
+
>>> from PIL import Image
|
718 |
+
>>> import requests
|
719 |
+
|
720 |
+
>>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
|
721 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
722 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
723 |
+
|
724 |
+
>>> # note: we are loading a DeiTForImageClassificationWithTeacher from the hub here,
|
725 |
+
>>> # so the head will be randomly initialized, hence the predictions will be random
|
726 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
727 |
+
>>> model = DeiTForImageClassification.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
728 |
+
|
729 |
+
>>> inputs = image_processor(images=image, return_tensors="pt")
|
730 |
+
>>> outputs = model(**inputs)
|
731 |
+
>>> logits = outputs.logits
|
732 |
+
>>> # model predicts one of the 1000 ImageNet classes
|
733 |
+
>>> predicted_class_idx = logits.argmax(-1).item()
|
734 |
+
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
|
735 |
+
Predicted class: Polaroid camera, Polaroid Land camera
|
736 |
+
```"""
|
737 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
738 |
+
|
739 |
+
outputs = self.deit(
|
740 |
+
pixel_values,
|
741 |
+
head_mask=head_mask,
|
742 |
+
output_attentions=output_attentions,
|
743 |
+
output_hidden_states=output_hidden_states,
|
744 |
+
return_dict=return_dict,
|
745 |
+
)
|
746 |
+
|
747 |
+
sequence_output = outputs[0]
|
748 |
+
|
749 |
+
logits = self.classifier(sequence_output[:, 0, :])
|
750 |
+
# we don't use the distillation token
|
751 |
+
|
752 |
+
loss = None
|
753 |
+
if labels is not None:
|
754 |
+
labels = labels.to(logits.device)
|
755 |
+
if self.config.problem_type is None:
|
756 |
+
if self.num_labels == 1:
|
757 |
+
self.config.problem_type = "regression"
|
758 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
759 |
+
self.config.problem_type = "single_label_classification"
|
760 |
+
else:
|
761 |
+
self.config.problem_type = "multi_label_classification"
|
762 |
+
|
763 |
+
if self.config.problem_type == "regression":
|
764 |
+
loss_fct = MSELoss()
|
765 |
+
if self.num_labels == 1:
|
766 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
767 |
+
else:
|
768 |
+
loss = loss_fct(logits, labels)
|
769 |
+
elif self.config.problem_type == "single_label_classification":
|
770 |
+
loss_fct = CrossEntropyLoss()
|
771 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
772 |
+
elif self.config.problem_type == "multi_label_classification":
|
773 |
+
loss_fct = BCEWithLogitsLoss()
|
774 |
+
loss = loss_fct(logits, labels)
|
775 |
+
if not return_dict:
|
776 |
+
output = (logits,) + outputs[1:]
|
777 |
+
return ((loss,) + output) if loss is not None else output
|
778 |
+
|
779 |
+
return ImageClassifierOutput(
|
780 |
+
loss=loss,
|
781 |
+
logits=logits,
|
782 |
+
hidden_states=outputs.hidden_states,
|
783 |
+
attentions=outputs.attentions,
|
784 |
+
)
|
785 |
+
|
786 |
+
|
787 |
+
@dataclass
|
788 |
+
class DeiTForImageClassificationWithTeacherOutput(ModelOutput):
|
789 |
+
"""
|
790 |
+
Output type of [`DeiTForImageClassificationWithTeacher`].
|
791 |
+
|
792 |
+
Args:
|
793 |
+
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
|
794 |
+
Prediction scores as the average of the cls_logits and distillation logits.
|
795 |
+
cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
|
796 |
+
Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
|
797 |
+
class token).
|
798 |
+
distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
|
799 |
+
Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
|
800 |
+
distillation token).
|
801 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
802 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
|
803 |
+
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
|
804 |
+
plus the initial embedding outputs.
|
805 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
806 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
807 |
+
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
|
808 |
+
the self-attention heads.
|
809 |
+
"""
|
810 |
+
|
811 |
+
logits: torch.FloatTensor = None
|
812 |
+
cls_logits: torch.FloatTensor = None
|
813 |
+
distillation_logits: torch.FloatTensor = None
|
814 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
815 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
816 |
+
|
817 |
+
|
818 |
+
@add_start_docstrings(
|
819 |
+
"""
|
820 |
+
DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of
|
821 |
+
the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
|
822 |
+
|
823 |
+
.. warning::
|
824 |
+
|
825 |
+
This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
|
826 |
+
supported.
|
827 |
+
""",
|
828 |
+
DEIT_START_DOCSTRING,
|
829 |
+
)
|
830 |
+
class DeiTForImageClassificationWithTeacher(DeiTPreTrainedModel):
|
831 |
+
def __init__(self, config: DeiTConfig) -> None:
|
832 |
+
super().__init__(config)
|
833 |
+
|
834 |
+
self.num_labels = config.num_labels
|
835 |
+
self.deit = DeiTModel(config, add_pooling_layer=False)
|
836 |
+
|
837 |
+
# Classifier heads
|
838 |
+
self.cls_classifier = (
|
839 |
+
nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
840 |
+
)
|
841 |
+
self.distillation_classifier = (
|
842 |
+
nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
843 |
+
)
|
844 |
+
|
845 |
+
# Initialize weights and apply final processing
|
846 |
+
self.post_init()
|
847 |
+
|
848 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
849 |
+
@add_code_sample_docstrings(
|
850 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
851 |
+
output_type=DeiTForImageClassificationWithTeacherOutput,
|
852 |
+
config_class=_CONFIG_FOR_DOC,
|
853 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
854 |
+
)
|
855 |
+
def forward(
|
856 |
+
self,
|
857 |
+
pixel_values: Optional[torch.Tensor] = None,
|
858 |
+
head_mask: Optional[torch.Tensor] = None,
|
859 |
+
output_attentions: Optional[bool] = None,
|
860 |
+
output_hidden_states: Optional[bool] = None,
|
861 |
+
return_dict: Optional[bool] = None,
|
862 |
+
) -> Union[tuple, DeiTForImageClassificationWithTeacherOutput]:
|
863 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
864 |
+
|
865 |
+
outputs = self.deit(
|
866 |
+
pixel_values,
|
867 |
+
head_mask=head_mask,
|
868 |
+
output_attentions=output_attentions,
|
869 |
+
output_hidden_states=output_hidden_states,
|
870 |
+
return_dict=return_dict,
|
871 |
+
)
|
872 |
+
|
873 |
+
sequence_output = outputs[0]
|
874 |
+
|
875 |
+
cls_logits = self.cls_classifier(sequence_output[:, 0, :])
|
876 |
+
distillation_logits = self.distillation_classifier(sequence_output[:, 1, :])
|
877 |
+
|
878 |
+
# during inference, return the average of both classifier predictions
|
879 |
+
logits = (cls_logits + distillation_logits) / 2
|
880 |
+
|
881 |
+
if not return_dict:
|
882 |
+
output = (logits, cls_logits, distillation_logits) + outputs[1:]
|
883 |
+
return output
|
884 |
+
|
885 |
+
return DeiTForImageClassificationWithTeacherOutput(
|
886 |
+
logits=logits,
|
887 |
+
cls_logits=cls_logits,
|
888 |
+
distillation_logits=distillation_logits,
|
889 |
+
hidden_states=outputs.hidden_states,
|
890 |
+
attentions=outputs.attentions,
|
891 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_tf_deit.py
ADDED
@@ -0,0 +1,1178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 Facebook AI Research (FAIR) and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" TensorFlow DeiT model."""
|
16 |
+
|
17 |
+
|
18 |
+
from __future__ import annotations
|
19 |
+
|
20 |
+
import collections.abc
|
21 |
+
import math
|
22 |
+
from dataclasses import dataclass
|
23 |
+
from typing import Optional, Tuple, Union
|
24 |
+
|
25 |
+
import tensorflow as tf
|
26 |
+
|
27 |
+
from ...activations_tf import get_tf_activation
|
28 |
+
from ...modeling_tf_outputs import (
|
29 |
+
TFBaseModelOutput,
|
30 |
+
TFBaseModelOutputWithPooling,
|
31 |
+
TFImageClassifierOutput,
|
32 |
+
TFMaskedImageModelingOutput,
|
33 |
+
)
|
34 |
+
from ...modeling_tf_utils import (
|
35 |
+
TFPreTrainedModel,
|
36 |
+
TFSequenceClassificationLoss,
|
37 |
+
get_initializer,
|
38 |
+
keras,
|
39 |
+
keras_serializable,
|
40 |
+
unpack_inputs,
|
41 |
+
)
|
42 |
+
from ...tf_utils import shape_list, stable_softmax
|
43 |
+
from ...utils import (
|
44 |
+
ModelOutput,
|
45 |
+
add_code_sample_docstrings,
|
46 |
+
add_start_docstrings,
|
47 |
+
add_start_docstrings_to_model_forward,
|
48 |
+
logging,
|
49 |
+
replace_return_docstrings,
|
50 |
+
)
|
51 |
+
from .configuration_deit import DeiTConfig
|
52 |
+
|
53 |
+
|
54 |
+
logger = logging.get_logger(__name__)
|
55 |
+
|
56 |
+
# General docstring
|
57 |
+
_CONFIG_FOR_DOC = "DeiTConfig"
|
58 |
+
|
59 |
+
# Base docstring
|
60 |
+
_CHECKPOINT_FOR_DOC = "facebook/deit-base-distilled-patch16-224"
|
61 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 198, 768]
|
62 |
+
|
63 |
+
# Image classification docstring
|
64 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/deit-base-distilled-patch16-224"
|
65 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
66 |
+
|
67 |
+
|
68 |
+
from ..deprecated._archive_maps import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
69 |
+
|
70 |
+
|
71 |
+
@dataclass
|
72 |
+
class TFDeiTForImageClassificationWithTeacherOutput(ModelOutput):
|
73 |
+
"""
|
74 |
+
Output type of [`DeiTForImageClassificationWithTeacher`].
|
75 |
+
|
76 |
+
Args:
|
77 |
+
logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
|
78 |
+
Prediction scores as the average of the cls_logits and distillation logits.
|
79 |
+
cls_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
|
80 |
+
Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
|
81 |
+
class token).
|
82 |
+
distillation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
|
83 |
+
Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
|
84 |
+
distillation token).
|
85 |
+
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
86 |
+
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
|
87 |
+
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
|
88 |
+
the initial embedding outputs.
|
89 |
+
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
90 |
+
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
91 |
+
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
|
92 |
+
the self-attention heads.
|
93 |
+
"""
|
94 |
+
|
95 |
+
logits: tf.Tensor = None
|
96 |
+
cls_logits: tf.Tensor = None
|
97 |
+
distillation_logits: tf.Tensor = None
|
98 |
+
hidden_states: Tuple[tf.Tensor] | None = None
|
99 |
+
attentions: Tuple[tf.Tensor] | None = None
|
100 |
+
|
101 |
+
|
102 |
+
class TFDeiTEmbeddings(keras.layers.Layer):
|
103 |
+
"""
|
104 |
+
Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token.
|
105 |
+
"""
|
106 |
+
|
107 |
+
def __init__(self, config: DeiTConfig, use_mask_token: bool = False, **kwargs) -> None:
|
108 |
+
super().__init__(**kwargs)
|
109 |
+
self.config = config
|
110 |
+
self.use_mask_token = use_mask_token
|
111 |
+
self.patch_embeddings = TFDeiTPatchEmbeddings(config=config, name="patch_embeddings")
|
112 |
+
self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
|
113 |
+
|
114 |
+
def build(self, input_shape=None):
|
115 |
+
self.cls_token = self.add_weight(
|
116 |
+
shape=(1, 1, self.config.hidden_size),
|
117 |
+
initializer=keras.initializers.zeros(),
|
118 |
+
trainable=True,
|
119 |
+
name="cls_token",
|
120 |
+
)
|
121 |
+
self.distillation_token = self.add_weight(
|
122 |
+
shape=(1, 1, self.config.hidden_size),
|
123 |
+
initializer=keras.initializers.zeros(),
|
124 |
+
trainable=True,
|
125 |
+
name="distillation_token",
|
126 |
+
)
|
127 |
+
self.mask_token = None
|
128 |
+
if self.use_mask_token:
|
129 |
+
self.mask_token = self.add_weight(
|
130 |
+
shape=(1, 1, self.config.hidden_size),
|
131 |
+
initializer=keras.initializers.zeros(),
|
132 |
+
trainable=True,
|
133 |
+
name="mask_token",
|
134 |
+
)
|
135 |
+
num_patches = self.patch_embeddings.num_patches
|
136 |
+
self.position_embeddings = self.add_weight(
|
137 |
+
shape=(1, num_patches + 2, self.config.hidden_size),
|
138 |
+
initializer=keras.initializers.zeros(),
|
139 |
+
trainable=True,
|
140 |
+
name="position_embeddings",
|
141 |
+
)
|
142 |
+
|
143 |
+
if self.built:
|
144 |
+
return
|
145 |
+
self.built = True
|
146 |
+
if getattr(self, "patch_embeddings", None) is not None:
|
147 |
+
with tf.name_scope(self.patch_embeddings.name):
|
148 |
+
self.patch_embeddings.build(None)
|
149 |
+
if getattr(self, "dropout", None) is not None:
|
150 |
+
with tf.name_scope(self.dropout.name):
|
151 |
+
self.dropout.build(None)
|
152 |
+
|
153 |
+
def call(
|
154 |
+
self, pixel_values: tf.Tensor, bool_masked_pos: tf.Tensor | None = None, training: bool = False
|
155 |
+
) -> tf.Tensor:
|
156 |
+
embeddings = self.patch_embeddings(pixel_values)
|
157 |
+
batch_size, seq_length, _ = shape_list(embeddings)
|
158 |
+
|
159 |
+
if bool_masked_pos is not None:
|
160 |
+
mask_tokens = tf.tile(self.mask_token, [batch_size, seq_length, 1])
|
161 |
+
# replace the masked visual tokens by mask_tokens
|
162 |
+
mask = tf.expand_dims(bool_masked_pos, axis=-1)
|
163 |
+
mask = tf.cast(mask, dtype=mask_tokens.dtype)
|
164 |
+
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
|
165 |
+
|
166 |
+
cls_tokens = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
|
167 |
+
distillation_tokens = tf.repeat(self.distillation_token, repeats=batch_size, axis=0)
|
168 |
+
embeddings = tf.concat((cls_tokens, distillation_tokens, embeddings), axis=1)
|
169 |
+
embeddings = embeddings + self.position_embeddings
|
170 |
+
embeddings = self.dropout(embeddings, training=training)
|
171 |
+
return embeddings
|
172 |
+
|
173 |
+
|
174 |
+
class TFDeiTPatchEmbeddings(keras.layers.Layer):
|
175 |
+
"""
|
176 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
177 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
|
178 |
+
Transformer.
|
179 |
+
"""
|
180 |
+
|
181 |
+
def __init__(self, config: DeiTConfig, **kwargs) -> None:
|
182 |
+
super().__init__(**kwargs)
|
183 |
+
image_size, patch_size = config.image_size, config.patch_size
|
184 |
+
num_channels, hidden_size = config.num_channels, config.hidden_size
|
185 |
+
|
186 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
187 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
188 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
189 |
+
self.image_size = image_size
|
190 |
+
self.patch_size = patch_size
|
191 |
+
self.num_channels = num_channels
|
192 |
+
self.num_patches = num_patches
|
193 |
+
|
194 |
+
self.projection = keras.layers.Conv2D(
|
195 |
+
hidden_size, kernel_size=patch_size, strides=patch_size, name="projection"
|
196 |
+
)
|
197 |
+
|
198 |
+
def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
|
199 |
+
batch_size, height, width, num_channels = shape_list(pixel_values)
|
200 |
+
if tf.executing_eagerly() and num_channels != self.num_channels:
|
201 |
+
raise ValueError(
|
202 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
203 |
+
)
|
204 |
+
if tf.executing_eagerly() and (height != self.image_size[0] or width != self.image_size[1]):
|
205 |
+
raise ValueError(
|
206 |
+
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
|
207 |
+
)
|
208 |
+
x = self.projection(pixel_values)
|
209 |
+
batch_size, height, width, num_channels = shape_list(x)
|
210 |
+
x = tf.reshape(x, (batch_size, height * width, num_channels))
|
211 |
+
return x
|
212 |
+
|
213 |
+
def build(self, input_shape=None):
|
214 |
+
if self.built:
|
215 |
+
return
|
216 |
+
self.built = True
|
217 |
+
if getattr(self, "projection", None) is not None:
|
218 |
+
with tf.name_scope(self.projection.name):
|
219 |
+
self.projection.build([None, None, None, self.num_channels])
|
220 |
+
|
221 |
+
|
222 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTSelfAttention with ViT->DeiT
|
223 |
+
class TFDeiTSelfAttention(keras.layers.Layer):
|
224 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
225 |
+
super().__init__(**kwargs)
|
226 |
+
|
227 |
+
if config.hidden_size % config.num_attention_heads != 0:
|
228 |
+
raise ValueError(
|
229 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
|
230 |
+
f"of attention heads ({config.num_attention_heads})"
|
231 |
+
)
|
232 |
+
|
233 |
+
self.num_attention_heads = config.num_attention_heads
|
234 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
235 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
236 |
+
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
|
237 |
+
|
238 |
+
self.query = keras.layers.Dense(
|
239 |
+
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
|
240 |
+
)
|
241 |
+
self.key = keras.layers.Dense(
|
242 |
+
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
|
243 |
+
)
|
244 |
+
self.value = keras.layers.Dense(
|
245 |
+
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
|
246 |
+
)
|
247 |
+
self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
|
248 |
+
self.config = config
|
249 |
+
|
250 |
+
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
|
251 |
+
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
|
252 |
+
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
|
253 |
+
|
254 |
+
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
|
255 |
+
return tf.transpose(tensor, perm=[0, 2, 1, 3])
|
256 |
+
|
257 |
+
def call(
|
258 |
+
self,
|
259 |
+
hidden_states: tf.Tensor,
|
260 |
+
head_mask: tf.Tensor,
|
261 |
+
output_attentions: bool,
|
262 |
+
training: bool = False,
|
263 |
+
) -> Tuple[tf.Tensor]:
|
264 |
+
batch_size = shape_list(hidden_states)[0]
|
265 |
+
mixed_query_layer = self.query(inputs=hidden_states)
|
266 |
+
mixed_key_layer = self.key(inputs=hidden_states)
|
267 |
+
mixed_value_layer = self.value(inputs=hidden_states)
|
268 |
+
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
|
269 |
+
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
|
270 |
+
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
|
271 |
+
|
272 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
273 |
+
# (batch size, num_heads, seq_len_q, seq_len_k)
|
274 |
+
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
|
275 |
+
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
|
276 |
+
attention_scores = tf.divide(attention_scores, dk)
|
277 |
+
|
278 |
+
# Normalize the attention scores to probabilities.
|
279 |
+
attention_probs = stable_softmax(logits=attention_scores, axis=-1)
|
280 |
+
|
281 |
+
# This is actually dropping out entire tokens to attend to, which might
|
282 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
283 |
+
attention_probs = self.dropout(inputs=attention_probs, training=training)
|
284 |
+
|
285 |
+
# Mask heads if we want to
|
286 |
+
if head_mask is not None:
|
287 |
+
attention_probs = tf.multiply(attention_probs, head_mask)
|
288 |
+
|
289 |
+
attention_output = tf.matmul(attention_probs, value_layer)
|
290 |
+
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
|
291 |
+
|
292 |
+
# (batch_size, seq_len_q, all_head_size)
|
293 |
+
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
|
294 |
+
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
|
295 |
+
|
296 |
+
return outputs
|
297 |
+
|
298 |
+
def build(self, input_shape=None):
|
299 |
+
if self.built:
|
300 |
+
return
|
301 |
+
self.built = True
|
302 |
+
if getattr(self, "query", None) is not None:
|
303 |
+
with tf.name_scope(self.query.name):
|
304 |
+
self.query.build([None, None, self.config.hidden_size])
|
305 |
+
if getattr(self, "key", None) is not None:
|
306 |
+
with tf.name_scope(self.key.name):
|
307 |
+
self.key.build([None, None, self.config.hidden_size])
|
308 |
+
if getattr(self, "value", None) is not None:
|
309 |
+
with tf.name_scope(self.value.name):
|
310 |
+
self.value.build([None, None, self.config.hidden_size])
|
311 |
+
|
312 |
+
|
313 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTSelfOutput with ViT->DeiT
|
314 |
+
class TFDeiTSelfOutput(keras.layers.Layer):
|
315 |
+
"""
|
316 |
+
The residual connection is defined in TFDeiTLayer instead of here (as is the case with other models), due to the
|
317 |
+
layernorm applied before each block.
|
318 |
+
"""
|
319 |
+
|
320 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
321 |
+
super().__init__(**kwargs)
|
322 |
+
|
323 |
+
self.dense = keras.layers.Dense(
|
324 |
+
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
325 |
+
)
|
326 |
+
self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
|
327 |
+
self.config = config
|
328 |
+
|
329 |
+
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
|
330 |
+
hidden_states = self.dense(inputs=hidden_states)
|
331 |
+
hidden_states = self.dropout(inputs=hidden_states, training=training)
|
332 |
+
|
333 |
+
return hidden_states
|
334 |
+
|
335 |
+
def build(self, input_shape=None):
|
336 |
+
if self.built:
|
337 |
+
return
|
338 |
+
self.built = True
|
339 |
+
if getattr(self, "dense", None) is not None:
|
340 |
+
with tf.name_scope(self.dense.name):
|
341 |
+
self.dense.build([None, None, self.config.hidden_size])
|
342 |
+
|
343 |
+
|
344 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTAttention with ViT->DeiT
|
345 |
+
class TFDeiTAttention(keras.layers.Layer):
|
346 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
347 |
+
super().__init__(**kwargs)
|
348 |
+
|
349 |
+
self.self_attention = TFDeiTSelfAttention(config, name="attention")
|
350 |
+
self.dense_output = TFDeiTSelfOutput(config, name="output")
|
351 |
+
|
352 |
+
def prune_heads(self, heads):
|
353 |
+
raise NotImplementedError
|
354 |
+
|
355 |
+
def call(
|
356 |
+
self,
|
357 |
+
input_tensor: tf.Tensor,
|
358 |
+
head_mask: tf.Tensor,
|
359 |
+
output_attentions: bool,
|
360 |
+
training: bool = False,
|
361 |
+
) -> Tuple[tf.Tensor]:
|
362 |
+
self_outputs = self.self_attention(
|
363 |
+
hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training
|
364 |
+
)
|
365 |
+
attention_output = self.dense_output(
|
366 |
+
hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
|
367 |
+
)
|
368 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
369 |
+
|
370 |
+
return outputs
|
371 |
+
|
372 |
+
def build(self, input_shape=None):
|
373 |
+
if self.built:
|
374 |
+
return
|
375 |
+
self.built = True
|
376 |
+
if getattr(self, "self_attention", None) is not None:
|
377 |
+
with tf.name_scope(self.self_attention.name):
|
378 |
+
self.self_attention.build(None)
|
379 |
+
if getattr(self, "dense_output", None) is not None:
|
380 |
+
with tf.name_scope(self.dense_output.name):
|
381 |
+
self.dense_output.build(None)
|
382 |
+
|
383 |
+
|
384 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTIntermediate with ViT->DeiT
|
385 |
+
class TFDeiTIntermediate(keras.layers.Layer):
|
386 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
387 |
+
super().__init__(**kwargs)
|
388 |
+
|
389 |
+
self.dense = keras.layers.Dense(
|
390 |
+
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
391 |
+
)
|
392 |
+
|
393 |
+
if isinstance(config.hidden_act, str):
|
394 |
+
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
|
395 |
+
else:
|
396 |
+
self.intermediate_act_fn = config.hidden_act
|
397 |
+
self.config = config
|
398 |
+
|
399 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
400 |
+
hidden_states = self.dense(inputs=hidden_states)
|
401 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
402 |
+
|
403 |
+
return hidden_states
|
404 |
+
|
405 |
+
def build(self, input_shape=None):
|
406 |
+
if self.built:
|
407 |
+
return
|
408 |
+
self.built = True
|
409 |
+
if getattr(self, "dense", None) is not None:
|
410 |
+
with tf.name_scope(self.dense.name):
|
411 |
+
self.dense.build([None, None, self.config.hidden_size])
|
412 |
+
|
413 |
+
|
414 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTOutput with ViT->DeiT
|
415 |
+
class TFDeiTOutput(keras.layers.Layer):
|
416 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
417 |
+
super().__init__(**kwargs)
|
418 |
+
|
419 |
+
self.dense = keras.layers.Dense(
|
420 |
+
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
|
421 |
+
)
|
422 |
+
self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
|
423 |
+
self.config = config
|
424 |
+
|
425 |
+
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
|
426 |
+
hidden_states = self.dense(inputs=hidden_states)
|
427 |
+
hidden_states = self.dropout(inputs=hidden_states, training=training)
|
428 |
+
hidden_states = hidden_states + input_tensor
|
429 |
+
|
430 |
+
return hidden_states
|
431 |
+
|
432 |
+
def build(self, input_shape=None):
|
433 |
+
if self.built:
|
434 |
+
return
|
435 |
+
self.built = True
|
436 |
+
if getattr(self, "dense", None) is not None:
|
437 |
+
with tf.name_scope(self.dense.name):
|
438 |
+
self.dense.build([None, None, self.config.intermediate_size])
|
439 |
+
|
440 |
+
|
441 |
+
class TFDeiTLayer(keras.layers.Layer):
|
442 |
+
"""This corresponds to the Block class in the timm implementation."""
|
443 |
+
|
444 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
445 |
+
super().__init__(**kwargs)
|
446 |
+
|
447 |
+
self.attention = TFDeiTAttention(config, name="attention")
|
448 |
+
self.intermediate = TFDeiTIntermediate(config, name="intermediate")
|
449 |
+
self.deit_output = TFDeiTOutput(config, name="output")
|
450 |
+
|
451 |
+
self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before")
|
452 |
+
self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after")
|
453 |
+
self.config = config
|
454 |
+
|
455 |
+
def call(
|
456 |
+
self,
|
457 |
+
hidden_states: tf.Tensor,
|
458 |
+
head_mask: tf.Tensor,
|
459 |
+
output_attentions: bool,
|
460 |
+
training: bool = False,
|
461 |
+
) -> Tuple[tf.Tensor]:
|
462 |
+
attention_outputs = self.attention(
|
463 |
+
# in DeiT, layernorm is applied before self-attention
|
464 |
+
input_tensor=self.layernorm_before(inputs=hidden_states, training=training),
|
465 |
+
head_mask=head_mask,
|
466 |
+
output_attentions=output_attentions,
|
467 |
+
training=training,
|
468 |
+
)
|
469 |
+
attention_output = attention_outputs[0]
|
470 |
+
|
471 |
+
# first residual connection
|
472 |
+
hidden_states = attention_output + hidden_states
|
473 |
+
|
474 |
+
# in DeiT, layernorm is also applied after self-attention
|
475 |
+
layer_output = self.layernorm_after(inputs=hidden_states, training=training)
|
476 |
+
|
477 |
+
intermediate_output = self.intermediate(hidden_states=layer_output, training=training)
|
478 |
+
|
479 |
+
# second residual connection is done here
|
480 |
+
layer_output = self.deit_output(
|
481 |
+
hidden_states=intermediate_output, input_tensor=hidden_states, training=training
|
482 |
+
)
|
483 |
+
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
|
484 |
+
|
485 |
+
return outputs
|
486 |
+
|
487 |
+
def build(self, input_shape=None):
|
488 |
+
if self.built:
|
489 |
+
return
|
490 |
+
self.built = True
|
491 |
+
if getattr(self, "attention", None) is not None:
|
492 |
+
with tf.name_scope(self.attention.name):
|
493 |
+
self.attention.build(None)
|
494 |
+
if getattr(self, "intermediate", None) is not None:
|
495 |
+
with tf.name_scope(self.intermediate.name):
|
496 |
+
self.intermediate.build(None)
|
497 |
+
if getattr(self, "deit_output", None) is not None:
|
498 |
+
with tf.name_scope(self.deit_output.name):
|
499 |
+
self.deit_output.build(None)
|
500 |
+
if getattr(self, "layernorm_before", None) is not None:
|
501 |
+
with tf.name_scope(self.layernorm_before.name):
|
502 |
+
self.layernorm_before.build([None, None, self.config.hidden_size])
|
503 |
+
if getattr(self, "layernorm_after", None) is not None:
|
504 |
+
with tf.name_scope(self.layernorm_after.name):
|
505 |
+
self.layernorm_after.build([None, None, self.config.hidden_size])
|
506 |
+
|
507 |
+
|
508 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTEncoder with ViT->DeiT
|
509 |
+
class TFDeiTEncoder(keras.layers.Layer):
|
510 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
511 |
+
super().__init__(**kwargs)
|
512 |
+
|
513 |
+
self.layer = [TFDeiTLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
|
514 |
+
|
515 |
+
def call(
|
516 |
+
self,
|
517 |
+
hidden_states: tf.Tensor,
|
518 |
+
head_mask: tf.Tensor,
|
519 |
+
output_attentions: bool,
|
520 |
+
output_hidden_states: bool,
|
521 |
+
return_dict: bool,
|
522 |
+
training: bool = False,
|
523 |
+
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
|
524 |
+
all_hidden_states = () if output_hidden_states else None
|
525 |
+
all_attentions = () if output_attentions else None
|
526 |
+
|
527 |
+
for i, layer_module in enumerate(self.layer):
|
528 |
+
if output_hidden_states:
|
529 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
530 |
+
|
531 |
+
layer_outputs = layer_module(
|
532 |
+
hidden_states=hidden_states,
|
533 |
+
head_mask=head_mask[i],
|
534 |
+
output_attentions=output_attentions,
|
535 |
+
training=training,
|
536 |
+
)
|
537 |
+
hidden_states = layer_outputs[0]
|
538 |
+
|
539 |
+
if output_attentions:
|
540 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
541 |
+
|
542 |
+
# Add last layer
|
543 |
+
if output_hidden_states:
|
544 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
545 |
+
|
546 |
+
if not return_dict:
|
547 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
|
548 |
+
|
549 |
+
return TFBaseModelOutput(
|
550 |
+
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
551 |
+
)
|
552 |
+
|
553 |
+
def build(self, input_shape=None):
|
554 |
+
if self.built:
|
555 |
+
return
|
556 |
+
self.built = True
|
557 |
+
if getattr(self, "layer", None) is not None:
|
558 |
+
for layer in self.layer:
|
559 |
+
with tf.name_scope(layer.name):
|
560 |
+
layer.build(None)
|
561 |
+
|
562 |
+
|
563 |
+
@keras_serializable
|
564 |
+
class TFDeiTMainLayer(keras.layers.Layer):
|
565 |
+
config_class = DeiTConfig
|
566 |
+
|
567 |
+
def __init__(
|
568 |
+
self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs
|
569 |
+
) -> None:
|
570 |
+
super().__init__(**kwargs)
|
571 |
+
self.config = config
|
572 |
+
|
573 |
+
self.embeddings = TFDeiTEmbeddings(config, use_mask_token=use_mask_token, name="embeddings")
|
574 |
+
self.encoder = TFDeiTEncoder(config, name="encoder")
|
575 |
+
|
576 |
+
self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
|
577 |
+
self.pooler = TFDeiTPooler(config, name="pooler") if add_pooling_layer else None
|
578 |
+
|
579 |
+
def get_input_embeddings(self) -> TFDeiTPatchEmbeddings:
|
580 |
+
return self.embeddings.patch_embeddings
|
581 |
+
|
582 |
+
def _prune_heads(self, heads_to_prune):
|
583 |
+
"""
|
584 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
585 |
+
class PreTrainedModel
|
586 |
+
"""
|
587 |
+
raise NotImplementedError
|
588 |
+
|
589 |
+
def get_head_mask(self, head_mask):
|
590 |
+
if head_mask is not None:
|
591 |
+
raise NotImplementedError
|
592 |
+
else:
|
593 |
+
head_mask = [None] * self.config.num_hidden_layers
|
594 |
+
|
595 |
+
return head_mask
|
596 |
+
|
597 |
+
@unpack_inputs
|
598 |
+
def call(
|
599 |
+
self,
|
600 |
+
pixel_values: tf.Tensor | None = None,
|
601 |
+
bool_masked_pos: tf.Tensor | None = None,
|
602 |
+
head_mask: tf.Tensor | None = None,
|
603 |
+
output_attentions: Optional[bool] = None,
|
604 |
+
output_hidden_states: Optional[bool] = None,
|
605 |
+
return_dict: Optional[bool] = None,
|
606 |
+
training: bool = False,
|
607 |
+
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor, ...]]:
|
608 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
609 |
+
output_hidden_states = (
|
610 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
611 |
+
)
|
612 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
613 |
+
|
614 |
+
if pixel_values is None:
|
615 |
+
raise ValueError("You have to specify pixel_values")
|
616 |
+
|
617 |
+
# TF 2.0 image layers can't use NCHW format when running on CPU.
|
618 |
+
# (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
|
619 |
+
pixel_values = tf.transpose(pixel_values, (0, 2, 3, 1))
|
620 |
+
|
621 |
+
# Prepare head mask if needed
|
622 |
+
# 1.0 in head_mask indicate we keep the head
|
623 |
+
# attention_probs has shape bsz x n_heads x N x N
|
624 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
625 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
626 |
+
head_mask = self.get_head_mask(head_mask)
|
627 |
+
|
628 |
+
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, training=training)
|
629 |
+
|
630 |
+
encoder_outputs = self.encoder(
|
631 |
+
embedding_output,
|
632 |
+
head_mask=head_mask,
|
633 |
+
output_attentions=output_attentions,
|
634 |
+
output_hidden_states=output_hidden_states,
|
635 |
+
return_dict=return_dict,
|
636 |
+
training=training,
|
637 |
+
)
|
638 |
+
sequence_output = encoder_outputs[0]
|
639 |
+
sequence_output = self.layernorm(sequence_output, training=training)
|
640 |
+
pooled_output = self.pooler(sequence_output, training=training) if self.pooler is not None else None
|
641 |
+
|
642 |
+
if not return_dict:
|
643 |
+
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
|
644 |
+
return head_outputs + encoder_outputs[1:]
|
645 |
+
|
646 |
+
return TFBaseModelOutputWithPooling(
|
647 |
+
last_hidden_state=sequence_output,
|
648 |
+
pooler_output=pooled_output,
|
649 |
+
hidden_states=encoder_outputs.hidden_states,
|
650 |
+
attentions=encoder_outputs.attentions,
|
651 |
+
)
|
652 |
+
|
653 |
+
def build(self, input_shape=None):
|
654 |
+
if self.built:
|
655 |
+
return
|
656 |
+
self.built = True
|
657 |
+
if getattr(self, "embeddings", None) is not None:
|
658 |
+
with tf.name_scope(self.embeddings.name):
|
659 |
+
self.embeddings.build(None)
|
660 |
+
if getattr(self, "encoder", None) is not None:
|
661 |
+
with tf.name_scope(self.encoder.name):
|
662 |
+
self.encoder.build(None)
|
663 |
+
if getattr(self, "layernorm", None) is not None:
|
664 |
+
with tf.name_scope(self.layernorm.name):
|
665 |
+
self.layernorm.build([None, None, self.config.hidden_size])
|
666 |
+
if getattr(self, "pooler", None) is not None:
|
667 |
+
with tf.name_scope(self.pooler.name):
|
668 |
+
self.pooler.build(None)
|
669 |
+
|
670 |
+
|
671 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTPreTrainedModel with ViT->DeiT all-casing
|
672 |
+
class TFDeiTPreTrainedModel(TFPreTrainedModel):
|
673 |
+
"""
|
674 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
675 |
+
models.
|
676 |
+
"""
|
677 |
+
|
678 |
+
config_class = DeiTConfig
|
679 |
+
base_model_prefix = "deit"
|
680 |
+
main_input_name = "pixel_values"
|
681 |
+
|
682 |
+
|
683 |
+
DEIT_START_DOCSTRING = r"""
|
684 |
+
This model is a TensorFlow
|
685 |
+
[keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Use it as a regular
|
686 |
+
TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior.
|
687 |
+
|
688 |
+
Parameters:
|
689 |
+
config ([`DeiTConfig`]): Model configuration class with all the parameters of the model.
|
690 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
691 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
692 |
+
"""
|
693 |
+
|
694 |
+
DEIT_INPUTS_DOCSTRING = r"""
|
695 |
+
Args:
|
696 |
+
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
|
697 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
698 |
+
[`DeiTImageProcessor.__call__`] for details.
|
699 |
+
|
700 |
+
head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
701 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
702 |
+
|
703 |
+
- 1 indicates the head is **not masked**,
|
704 |
+
- 0 indicates the head is **masked**.
|
705 |
+
|
706 |
+
output_attentions (`bool`, *optional*):
|
707 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
708 |
+
tensors for more detail.
|
709 |
+
output_hidden_states (`bool`, *optional*):
|
710 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
711 |
+
more detail.
|
712 |
+
return_dict (`bool`, *optional*):
|
713 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
714 |
+
"""
|
715 |
+
|
716 |
+
|
717 |
+
@add_start_docstrings(
|
718 |
+
"The bare DeiT Model transformer outputting raw hidden-states without any specific head on top.",
|
719 |
+
DEIT_START_DOCSTRING,
|
720 |
+
)
|
721 |
+
class TFDeiTModel(TFDeiTPreTrainedModel):
|
722 |
+
def __init__(
|
723 |
+
self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs
|
724 |
+
) -> None:
|
725 |
+
super().__init__(config, **kwargs)
|
726 |
+
|
727 |
+
self.deit = TFDeiTMainLayer(
|
728 |
+
config, add_pooling_layer=add_pooling_layer, use_mask_token=use_mask_token, name="deit"
|
729 |
+
)
|
730 |
+
|
731 |
+
@unpack_inputs
|
732 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
733 |
+
@add_code_sample_docstrings(
|
734 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
735 |
+
output_type=TFBaseModelOutputWithPooling,
|
736 |
+
config_class=_CONFIG_FOR_DOC,
|
737 |
+
modality="vision",
|
738 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
739 |
+
)
|
740 |
+
def call(
|
741 |
+
self,
|
742 |
+
pixel_values: tf.Tensor | None = None,
|
743 |
+
bool_masked_pos: tf.Tensor | None = None,
|
744 |
+
head_mask: tf.Tensor | None = None,
|
745 |
+
output_attentions: Optional[bool] = None,
|
746 |
+
output_hidden_states: Optional[bool] = None,
|
747 |
+
return_dict: Optional[bool] = None,
|
748 |
+
training: bool = False,
|
749 |
+
) -> Union[Tuple, TFBaseModelOutputWithPooling]:
|
750 |
+
outputs = self.deit(
|
751 |
+
pixel_values=pixel_values,
|
752 |
+
bool_masked_pos=bool_masked_pos,
|
753 |
+
head_mask=head_mask,
|
754 |
+
output_attentions=output_attentions,
|
755 |
+
output_hidden_states=output_hidden_states,
|
756 |
+
return_dict=return_dict,
|
757 |
+
training=training,
|
758 |
+
)
|
759 |
+
return outputs
|
760 |
+
|
761 |
+
def build(self, input_shape=None):
|
762 |
+
if self.built:
|
763 |
+
return
|
764 |
+
self.built = True
|
765 |
+
if getattr(self, "deit", None) is not None:
|
766 |
+
with tf.name_scope(self.deit.name):
|
767 |
+
self.deit.build(None)
|
768 |
+
|
769 |
+
|
770 |
+
# Copied from transformers.models.vit.modeling_tf_vit.TFViTPooler with ViT->DeiT
|
771 |
+
class TFDeiTPooler(keras.layers.Layer):
|
772 |
+
def __init__(self, config: DeiTConfig, **kwargs):
|
773 |
+
super().__init__(**kwargs)
|
774 |
+
|
775 |
+
self.dense = keras.layers.Dense(
|
776 |
+
units=config.hidden_size,
|
777 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
778 |
+
activation="tanh",
|
779 |
+
name="dense",
|
780 |
+
)
|
781 |
+
self.config = config
|
782 |
+
|
783 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
784 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
785 |
+
# to the first token.
|
786 |
+
first_token_tensor = hidden_states[:, 0]
|
787 |
+
pooled_output = self.dense(inputs=first_token_tensor)
|
788 |
+
|
789 |
+
return pooled_output
|
790 |
+
|
791 |
+
def build(self, input_shape=None):
|
792 |
+
if self.built:
|
793 |
+
return
|
794 |
+
self.built = True
|
795 |
+
if getattr(self, "dense", None) is not None:
|
796 |
+
with tf.name_scope(self.dense.name):
|
797 |
+
self.dense.build([None, None, self.config.hidden_size])
|
798 |
+
|
799 |
+
|
800 |
+
class TFDeitPixelShuffle(keras.layers.Layer):
|
801 |
+
"""TF layer implementation of torch.nn.PixelShuffle"""
|
802 |
+
|
803 |
+
def __init__(self, upscale_factor: int, **kwargs) -> None:
|
804 |
+
super().__init__(**kwargs)
|
805 |
+
if not isinstance(upscale_factor, int) or upscale_factor < 2:
|
806 |
+
raise ValueError(f"upscale_factor must be an integer value >= 2 got {upscale_factor}")
|
807 |
+
self.upscale_factor = upscale_factor
|
808 |
+
|
809 |
+
def call(self, x: tf.Tensor) -> tf.Tensor:
|
810 |
+
hidden_states = x
|
811 |
+
batch_size, _, _, num_input_channels = shape_list(hidden_states)
|
812 |
+
block_size_squared = self.upscale_factor**2
|
813 |
+
output_depth = int(num_input_channels / block_size_squared)
|
814 |
+
# When the number of output channels >= 2, PyTorch's PixelShuffle and
|
815 |
+
# TF's depth_to_space differ in their output as the order of channels selected for combining
|
816 |
+
# is a permutation of the other c.f.
|
817 |
+
# https://stackoverflow.com/questions/68272502/tf-depth-to-space-not-same-as-torchs-pixelshuffle-when-output-channels-1
|
818 |
+
permutation = tf.constant(
|
819 |
+
[[i + j * block_size_squared for i in range(block_size_squared) for j in range(output_depth)]]
|
820 |
+
)
|
821 |
+
hidden_states = tf.gather(params=hidden_states, indices=tf.tile(permutation, [batch_size, 1]), batch_dims=-1)
|
822 |
+
hidden_states = tf.nn.depth_to_space(hidden_states, block_size=self.upscale_factor, data_format="NHWC")
|
823 |
+
return hidden_states
|
824 |
+
|
825 |
+
|
826 |
+
class TFDeitDecoder(keras.layers.Layer):
|
827 |
+
def __init__(self, config: DeiTConfig, **kwargs) -> None:
|
828 |
+
super().__init__(**kwargs)
|
829 |
+
self.conv2d = keras.layers.Conv2D(
|
830 |
+
filters=config.encoder_stride**2 * config.num_channels, kernel_size=1, name="0"
|
831 |
+
)
|
832 |
+
self.pixel_shuffle = TFDeitPixelShuffle(config.encoder_stride, name="1")
|
833 |
+
self.config = config
|
834 |
+
|
835 |
+
def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
|
836 |
+
hidden_states = inputs
|
837 |
+
hidden_states = self.conv2d(hidden_states)
|
838 |
+
hidden_states = self.pixel_shuffle(hidden_states)
|
839 |
+
return hidden_states
|
840 |
+
|
841 |
+
def build(self, input_shape=None):
|
842 |
+
if self.built:
|
843 |
+
return
|
844 |
+
self.built = True
|
845 |
+
if getattr(self, "conv2d", None) is not None:
|
846 |
+
with tf.name_scope(self.conv2d.name):
|
847 |
+
self.conv2d.build([None, None, None, self.config.hidden_size])
|
848 |
+
if getattr(self, "pixel_shuffle", None) is not None:
|
849 |
+
with tf.name_scope(self.pixel_shuffle.name):
|
850 |
+
self.pixel_shuffle.build(None)
|
851 |
+
|
852 |
+
|
853 |
+
@add_start_docstrings(
|
854 |
+
"DeiT Model with a decoder on top for masked image modeling, as proposed in"
|
855 |
+
" [SimMIM](https://arxiv.org/abs/2111.09886).",
|
856 |
+
DEIT_START_DOCSTRING,
|
857 |
+
)
|
858 |
+
class TFDeiTForMaskedImageModeling(TFDeiTPreTrainedModel):
|
859 |
+
def __init__(self, config: DeiTConfig) -> None:
|
860 |
+
super().__init__(config)
|
861 |
+
|
862 |
+
self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, use_mask_token=True, name="deit")
|
863 |
+
self.decoder = TFDeitDecoder(config, name="decoder")
|
864 |
+
|
865 |
+
@unpack_inputs
|
866 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
867 |
+
@replace_return_docstrings(output_type=TFMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
|
868 |
+
def call(
|
869 |
+
self,
|
870 |
+
pixel_values: tf.Tensor | None = None,
|
871 |
+
bool_masked_pos: tf.Tensor | None = None,
|
872 |
+
head_mask: tf.Tensor | None = None,
|
873 |
+
output_attentions: Optional[bool] = None,
|
874 |
+
output_hidden_states: Optional[bool] = None,
|
875 |
+
return_dict: Optional[bool] = None,
|
876 |
+
training: bool = False,
|
877 |
+
) -> Union[tuple, TFMaskedImageModelingOutput]:
|
878 |
+
r"""
|
879 |
+
bool_masked_pos (`tf.Tensor` of type bool and shape `(batch_size, num_patches)`):
|
880 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
|
881 |
+
|
882 |
+
Returns:
|
883 |
+
|
884 |
+
Examples:
|
885 |
+
```python
|
886 |
+
>>> from transformers import AutoImageProcessor, TFDeiTForMaskedImageModeling
|
887 |
+
>>> import tensorflow as tf
|
888 |
+
>>> from PIL import Image
|
889 |
+
>>> import requests
|
890 |
+
|
891 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
892 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
893 |
+
|
894 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
895 |
+
>>> model = TFDeiTForMaskedImageModeling.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
896 |
+
|
897 |
+
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
|
898 |
+
>>> pixel_values = image_processor(images=image, return_tensors="tf").pixel_values
|
899 |
+
>>> # create random boolean mask of shape (batch_size, num_patches)
|
900 |
+
>>> bool_masked_pos = tf.cast(tf.random.uniform((1, num_patches), minval=0, maxval=2, dtype=tf.int32), tf.bool)
|
901 |
+
|
902 |
+
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
|
903 |
+
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
|
904 |
+
>>> list(reconstructed_pixel_values.shape)
|
905 |
+
[1, 3, 224, 224]
|
906 |
+
```"""
|
907 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
908 |
+
|
909 |
+
outputs = self.deit(
|
910 |
+
pixel_values,
|
911 |
+
bool_masked_pos=bool_masked_pos,
|
912 |
+
head_mask=head_mask,
|
913 |
+
output_attentions=output_attentions,
|
914 |
+
output_hidden_states=output_hidden_states,
|
915 |
+
return_dict=return_dict,
|
916 |
+
training=training,
|
917 |
+
)
|
918 |
+
|
919 |
+
sequence_output = outputs[0]
|
920 |
+
|
921 |
+
# Reshape to (batch_size, num_channels, height, width)
|
922 |
+
sequence_output = sequence_output[:, 1:-1]
|
923 |
+
batch_size, sequence_length, num_channels = shape_list(sequence_output)
|
924 |
+
height = width = int(sequence_length**0.5)
|
925 |
+
sequence_output = tf.reshape(sequence_output, (batch_size, height, width, num_channels))
|
926 |
+
|
927 |
+
# Reconstruct pixel values
|
928 |
+
reconstructed_pixel_values = self.decoder(sequence_output, training=training)
|
929 |
+
# TF 2.0 image layers can't use NCHW format when running on CPU, so intermediate layers use NHWC,
|
930 |
+
# including the decoder. We transpose to compute the loss against the pixel values
|
931 |
+
# (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
|
932 |
+
reconstructed_pixel_values = tf.transpose(reconstructed_pixel_values, (0, 3, 1, 2))
|
933 |
+
|
934 |
+
masked_im_loss = None
|
935 |
+
if bool_masked_pos is not None:
|
936 |
+
size = self.config.image_size // self.config.patch_size
|
937 |
+
bool_masked_pos = tf.reshape(bool_masked_pos, (-1, size, size))
|
938 |
+
mask = tf.repeat(bool_masked_pos, self.config.patch_size, 1)
|
939 |
+
mask = tf.repeat(mask, self.config.patch_size, 2)
|
940 |
+
mask = tf.expand_dims(mask, 1)
|
941 |
+
mask = tf.cast(mask, tf.float32)
|
942 |
+
|
943 |
+
reconstruction_loss = keras.losses.mean_absolute_error(
|
944 |
+
# Swap axes as metric calculation reduces over the final dimension
|
945 |
+
tf.transpose(pixel_values, (1, 2, 3, 0)),
|
946 |
+
tf.transpose(reconstructed_pixel_values, (1, 2, 3, 0)),
|
947 |
+
)
|
948 |
+
reconstruction_loss = tf.expand_dims(reconstruction_loss, 0)
|
949 |
+
total_loss = tf.reduce_sum(reconstruction_loss * mask)
|
950 |
+
num_masked_pixels = (tf.reduce_sum(mask) + 1e-5) * self.config.num_channels
|
951 |
+
masked_im_loss = total_loss / num_masked_pixels
|
952 |
+
masked_im_loss = tf.reshape(masked_im_loss, (1,))
|
953 |
+
|
954 |
+
if not return_dict:
|
955 |
+
output = (reconstructed_pixel_values,) + outputs[1:]
|
956 |
+
return ((masked_im_loss,) + output) if masked_im_loss is not None else output
|
957 |
+
|
958 |
+
return TFMaskedImageModelingOutput(
|
959 |
+
loss=masked_im_loss,
|
960 |
+
reconstruction=reconstructed_pixel_values,
|
961 |
+
hidden_states=outputs.hidden_states,
|
962 |
+
attentions=outputs.attentions,
|
963 |
+
)
|
964 |
+
|
965 |
+
def build(self, input_shape=None):
|
966 |
+
if self.built:
|
967 |
+
return
|
968 |
+
self.built = True
|
969 |
+
if getattr(self, "deit", None) is not None:
|
970 |
+
with tf.name_scope(self.deit.name):
|
971 |
+
self.deit.build(None)
|
972 |
+
if getattr(self, "decoder", None) is not None:
|
973 |
+
with tf.name_scope(self.decoder.name):
|
974 |
+
self.decoder.build(None)
|
975 |
+
|
976 |
+
|
977 |
+
@add_start_docstrings(
|
978 |
+
"""
|
979 |
+
DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
|
980 |
+
the [CLS] token) e.g. for ImageNet.
|
981 |
+
""",
|
982 |
+
DEIT_START_DOCSTRING,
|
983 |
+
)
|
984 |
+
class TFDeiTForImageClassification(TFDeiTPreTrainedModel, TFSequenceClassificationLoss):
|
985 |
+
def __init__(self, config: DeiTConfig):
|
986 |
+
super().__init__(config)
|
987 |
+
|
988 |
+
self.num_labels = config.num_labels
|
989 |
+
self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name="deit")
|
990 |
+
|
991 |
+
# Classifier head
|
992 |
+
self.classifier = (
|
993 |
+
keras.layers.Dense(config.num_labels, name="classifier")
|
994 |
+
if config.num_labels > 0
|
995 |
+
else keras.layers.Activation("linear", name="classifier")
|
996 |
+
)
|
997 |
+
self.config = config
|
998 |
+
|
999 |
+
@unpack_inputs
|
1000 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
1001 |
+
@replace_return_docstrings(output_type=TFImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
1002 |
+
def call(
|
1003 |
+
self,
|
1004 |
+
pixel_values: tf.Tensor | None = None,
|
1005 |
+
head_mask: tf.Tensor | None = None,
|
1006 |
+
labels: tf.Tensor | None = None,
|
1007 |
+
output_attentions: Optional[bool] = None,
|
1008 |
+
output_hidden_states: Optional[bool] = None,
|
1009 |
+
return_dict: Optional[bool] = None,
|
1010 |
+
training: bool = False,
|
1011 |
+
) -> Union[tf.Tensor, TFImageClassifierOutput]:
|
1012 |
+
r"""
|
1013 |
+
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
|
1014 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
1015 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1016 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1017 |
+
|
1018 |
+
Returns:
|
1019 |
+
|
1020 |
+
Examples:
|
1021 |
+
|
1022 |
+
```python
|
1023 |
+
>>> from transformers import AutoImageProcessor, TFDeiTForImageClassification
|
1024 |
+
>>> import tensorflow as tf
|
1025 |
+
>>> from PIL import Image
|
1026 |
+
>>> import requests
|
1027 |
+
|
1028 |
+
>>> keras.utils.set_random_seed(3) # doctest: +IGNORE_RESULT
|
1029 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1030 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1031 |
+
|
1032 |
+
>>> # note: we are loading a TFDeiTForImageClassificationWithTeacher from the hub here,
|
1033 |
+
>>> # so the head will be randomly initialized, hence the predictions will be random
|
1034 |
+
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
1035 |
+
>>> model = TFDeiTForImageClassification.from_pretrained("facebook/deit-base-distilled-patch16-224")
|
1036 |
+
|
1037 |
+
>>> inputs = image_processor(images=image, return_tensors="tf")
|
1038 |
+
>>> outputs = model(**inputs)
|
1039 |
+
>>> logits = outputs.logits
|
1040 |
+
>>> # model predicts one of the 1000 ImageNet classes
|
1041 |
+
>>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
|
1042 |
+
>>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
|
1043 |
+
Predicted class: little blue heron, Egretta caerulea
|
1044 |
+
```"""
|
1045 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1046 |
+
|
1047 |
+
outputs = self.deit(
|
1048 |
+
pixel_values,
|
1049 |
+
head_mask=head_mask,
|
1050 |
+
output_attentions=output_attentions,
|
1051 |
+
output_hidden_states=output_hidden_states,
|
1052 |
+
return_dict=return_dict,
|
1053 |
+
training=training,
|
1054 |
+
)
|
1055 |
+
|
1056 |
+
sequence_output = outputs[0]
|
1057 |
+
|
1058 |
+
logits = self.classifier(sequence_output[:, 0, :])
|
1059 |
+
# we don't use the distillation token
|
1060 |
+
|
1061 |
+
loss = None if labels is None else self.hf_compute_loss(labels, logits)
|
1062 |
+
|
1063 |
+
if not return_dict:
|
1064 |
+
output = (logits,) + outputs[1:]
|
1065 |
+
return ((loss,) + output) if loss is not None else output
|
1066 |
+
|
1067 |
+
return TFImageClassifierOutput(
|
1068 |
+
loss=loss,
|
1069 |
+
logits=logits,
|
1070 |
+
hidden_states=outputs.hidden_states,
|
1071 |
+
attentions=outputs.attentions,
|
1072 |
+
)
|
1073 |
+
|
1074 |
+
def build(self, input_shape=None):
|
1075 |
+
if self.built:
|
1076 |
+
return
|
1077 |
+
self.built = True
|
1078 |
+
if getattr(self, "deit", None) is not None:
|
1079 |
+
with tf.name_scope(self.deit.name):
|
1080 |
+
self.deit.build(None)
|
1081 |
+
if getattr(self, "classifier", None) is not None:
|
1082 |
+
with tf.name_scope(self.classifier.name):
|
1083 |
+
self.classifier.build([None, None, self.config.hidden_size])
|
1084 |
+
|
1085 |
+
|
1086 |
+
@add_start_docstrings(
|
1087 |
+
"""
|
1088 |
+
DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of
|
1089 |
+
the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
|
1090 |
+
|
1091 |
+
.. warning::
|
1092 |
+
|
1093 |
+
This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
|
1094 |
+
supported.
|
1095 |
+
""",
|
1096 |
+
DEIT_START_DOCSTRING,
|
1097 |
+
)
|
1098 |
+
class TFDeiTForImageClassificationWithTeacher(TFDeiTPreTrainedModel):
|
1099 |
+
def __init__(self, config: DeiTConfig) -> None:
|
1100 |
+
super().__init__(config)
|
1101 |
+
|
1102 |
+
self.num_labels = config.num_labels
|
1103 |
+
self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name="deit")
|
1104 |
+
|
1105 |
+
# Classifier heads
|
1106 |
+
self.cls_classifier = (
|
1107 |
+
keras.layers.Dense(config.num_labels, name="cls_classifier")
|
1108 |
+
if config.num_labels > 0
|
1109 |
+
else keras.layers.Activation("linear", name="cls_classifier")
|
1110 |
+
)
|
1111 |
+
self.distillation_classifier = (
|
1112 |
+
keras.layers.Dense(config.num_labels, name="distillation_classifier")
|
1113 |
+
if config.num_labels > 0
|
1114 |
+
else keras.layers.Activation("linear", name="distillation_classifier")
|
1115 |
+
)
|
1116 |
+
self.config = config
|
1117 |
+
|
1118 |
+
@unpack_inputs
|
1119 |
+
@add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
|
1120 |
+
@add_code_sample_docstrings(
|
1121 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
1122 |
+
output_type=TFDeiTForImageClassificationWithTeacherOutput,
|
1123 |
+
config_class=_CONFIG_FOR_DOC,
|
1124 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
1125 |
+
)
|
1126 |
+
def call(
|
1127 |
+
self,
|
1128 |
+
pixel_values: tf.Tensor | None = None,
|
1129 |
+
head_mask: tf.Tensor | None = None,
|
1130 |
+
output_attentions: Optional[bool] = None,
|
1131 |
+
output_hidden_states: Optional[bool] = None,
|
1132 |
+
return_dict: Optional[bool] = None,
|
1133 |
+
training: bool = False,
|
1134 |
+
) -> Union[tuple, TFDeiTForImageClassificationWithTeacherOutput]:
|
1135 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1136 |
+
|
1137 |
+
outputs = self.deit(
|
1138 |
+
pixel_values,
|
1139 |
+
head_mask=head_mask,
|
1140 |
+
output_attentions=output_attentions,
|
1141 |
+
output_hidden_states=output_hidden_states,
|
1142 |
+
return_dict=return_dict,
|
1143 |
+
training=training,
|
1144 |
+
)
|
1145 |
+
|
1146 |
+
sequence_output = outputs[0]
|
1147 |
+
|
1148 |
+
cls_logits = self.cls_classifier(sequence_output[:, 0, :])
|
1149 |
+
distillation_logits = self.distillation_classifier(sequence_output[:, 1, :])
|
1150 |
+
|
1151 |
+
# during inference, return the average of both classifier predictions
|
1152 |
+
logits = (cls_logits + distillation_logits) / 2
|
1153 |
+
|
1154 |
+
if not return_dict:
|
1155 |
+
output = (logits, cls_logits, distillation_logits) + outputs[1:]
|
1156 |
+
return output
|
1157 |
+
|
1158 |
+
return TFDeiTForImageClassificationWithTeacherOutput(
|
1159 |
+
logits=logits,
|
1160 |
+
cls_logits=cls_logits,
|
1161 |
+
distillation_logits=distillation_logits,
|
1162 |
+
hidden_states=outputs.hidden_states,
|
1163 |
+
attentions=outputs.attentions,
|
1164 |
+
)
|
1165 |
+
|
1166 |
+
def build(self, input_shape=None):
|
1167 |
+
if self.built:
|
1168 |
+
return
|
1169 |
+
self.built = True
|
1170 |
+
if getattr(self, "deit", None) is not None:
|
1171 |
+
with tf.name_scope(self.deit.name):
|
1172 |
+
self.deit.build(None)
|
1173 |
+
if getattr(self, "cls_classifier", None) is not None:
|
1174 |
+
with tf.name_scope(self.cls_classifier.name):
|
1175 |
+
self.cls_classifier.build([None, None, self.config.hidden_size])
|
1176 |
+
if getattr(self, "distillation_classifier", None) is not None:
|
1177 |
+
with tf.name_scope(self.distillation_classifier.name):
|
1178 |
+
self.distillation_classifier.build([None, None, self.config.hidden_size])
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (995 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc
ADDED
Binary file (7.66 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc
ADDED
Binary file (7.97 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc
ADDED
Binary file (27.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" DINOv2 model configuration"""
|
16 |
+
|
17 |
+
from collections import OrderedDict
|
18 |
+
from typing import Mapping
|
19 |
+
|
20 |
+
from packaging import version
|
21 |
+
|
22 |
+
from ...configuration_utils import PretrainedConfig
|
23 |
+
from ...onnx import OnnxConfig
|
24 |
+
from ...utils import logging
|
25 |
+
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
|
26 |
+
|
27 |
+
|
28 |
+
logger = logging.get_logger(__name__)
|
29 |
+
|
30 |
+
|
31 |
+
from ..deprecated._archive_maps import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
32 |
+
|
33 |
+
|
34 |
+
class Dinov2Config(BackboneConfigMixin, PretrainedConfig):
|
35 |
+
r"""
|
36 |
+
This is the configuration class to store the configuration of a [`Dinov2Model`]. It is used to instantiate an
|
37 |
+
Dinov2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
38 |
+
with the defaults will yield a similar configuration to that of the Dinov2
|
39 |
+
[google/dinov2-base-patch16-224](https://huggingface.co/google/dinov2-base-patch16-224) architecture.
|
40 |
+
|
41 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
42 |
+
documentation from [`PretrainedConfig`] for more information.
|
43 |
+
|
44 |
+
Args:
|
45 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
46 |
+
Dimensionality of the encoder layers and the pooler layer.
|
47 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
48 |
+
Number of hidden layers in the Transformer encoder.
|
49 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
50 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
51 |
+
mlp_ratio (`int`, *optional*, defaults to 4):
|
52 |
+
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
|
53 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
54 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
55 |
+
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
56 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
57 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
58 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
|
59 |
+
The dropout ratio for the attention probabilities.
|
60 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
61 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
62 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
63 |
+
The epsilon used by the layer normalization layers.
|
64 |
+
image_size (`int`, *optional*, defaults to 224):
|
65 |
+
The size (resolution) of each image.
|
66 |
+
patch_size (`int`, *optional*, defaults to 16):
|
67 |
+
The size (resolution) of each patch.
|
68 |
+
num_channels (`int`, *optional*, defaults to 3):
|
69 |
+
The number of input channels.
|
70 |
+
qkv_bias (`bool`, *optional*, defaults to `True`):
|
71 |
+
Whether to add a bias to the queries, keys and values.
|
72 |
+
layerscale_value (`float`, *optional*, defaults to 1.0):
|
73 |
+
Initial value to use for layer scale.
|
74 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
75 |
+
Stochastic depth rate per sample (when applied in the main path of residual layers).
|
76 |
+
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
|
77 |
+
Whether to use the SwiGLU feedforward neural network.
|
78 |
+
out_features (`List[str]`, *optional*):
|
79 |
+
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
|
80 |
+
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
|
81 |
+
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
|
82 |
+
same order as defined in the `stage_names` attribute.
|
83 |
+
out_indices (`List[int]`, *optional*):
|
84 |
+
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
|
85 |
+
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
|
86 |
+
If unset and `out_features` is unset, will default to the last stage. Must be in the
|
87 |
+
same order as defined in the `stage_names` attribute.
|
88 |
+
apply_layernorm (`bool`, *optional*, defaults to `True`):
|
89 |
+
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
|
90 |
+
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
|
91 |
+
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
|
92 |
+
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
|
93 |
+
seq_len, hidden_size)`.
|
94 |
+
|
95 |
+
Example:
|
96 |
+
|
97 |
+
```python
|
98 |
+
>>> from transformers import Dinov2Config, Dinov2Model
|
99 |
+
|
100 |
+
>>> # Initializing a Dinov2 dinov2-base-patch16-224 style configuration
|
101 |
+
>>> configuration = Dinov2Config()
|
102 |
+
|
103 |
+
>>> # Initializing a model (with random weights) from the dinov2-base-patch16-224 style configuration
|
104 |
+
>>> model = Dinov2Model(configuration)
|
105 |
+
|
106 |
+
>>> # Accessing the model configuration
|
107 |
+
>>> configuration = model.config
|
108 |
+
```"""
|
109 |
+
|
110 |
+
model_type = "dinov2"
|
111 |
+
|
112 |
+
def __init__(
|
113 |
+
self,
|
114 |
+
hidden_size=768,
|
115 |
+
num_hidden_layers=12,
|
116 |
+
num_attention_heads=12,
|
117 |
+
mlp_ratio=4,
|
118 |
+
hidden_act="gelu",
|
119 |
+
hidden_dropout_prob=0.0,
|
120 |
+
attention_probs_dropout_prob=0.0,
|
121 |
+
initializer_range=0.02,
|
122 |
+
layer_norm_eps=1e-6,
|
123 |
+
image_size=224,
|
124 |
+
patch_size=16,
|
125 |
+
num_channels=3,
|
126 |
+
qkv_bias=True,
|
127 |
+
layerscale_value=1.0,
|
128 |
+
drop_path_rate=0.0,
|
129 |
+
use_swiglu_ffn=False,
|
130 |
+
out_features=None,
|
131 |
+
out_indices=None,
|
132 |
+
apply_layernorm=True,
|
133 |
+
reshape_hidden_states=True,
|
134 |
+
**kwargs,
|
135 |
+
):
|
136 |
+
super().__init__(**kwargs)
|
137 |
+
|
138 |
+
self.hidden_size = hidden_size
|
139 |
+
self.num_hidden_layers = num_hidden_layers
|
140 |
+
self.num_attention_heads = num_attention_heads
|
141 |
+
self.mlp_ratio = mlp_ratio
|
142 |
+
self.hidden_act = hidden_act
|
143 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
144 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
145 |
+
self.initializer_range = initializer_range
|
146 |
+
self.layer_norm_eps = layer_norm_eps
|
147 |
+
self.image_size = image_size
|
148 |
+
self.patch_size = patch_size
|
149 |
+
self.num_channels = num_channels
|
150 |
+
self.qkv_bias = qkv_bias
|
151 |
+
self.layerscale_value = layerscale_value
|
152 |
+
self.drop_path_rate = drop_path_rate
|
153 |
+
self.use_swiglu_ffn = use_swiglu_ffn
|
154 |
+
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
|
155 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
156 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
157 |
+
)
|
158 |
+
self.apply_layernorm = apply_layernorm
|
159 |
+
self.reshape_hidden_states = reshape_hidden_states
|
160 |
+
|
161 |
+
|
162 |
+
class Dinov2OnnxConfig(OnnxConfig):
|
163 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
164 |
+
|
165 |
+
@property
|
166 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
167 |
+
return OrderedDict(
|
168 |
+
[
|
169 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
170 |
+
]
|
171 |
+
)
|
172 |
+
|
173 |
+
@property
|
174 |
+
def atol_for_validation(self) -> float:
|
175 |
+
return 1e-4
|
llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py
ADDED
@@ -0,0 +1,856 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch DINOv2 model."""
|
16 |
+
|
17 |
+
|
18 |
+
import collections.abc
|
19 |
+
import math
|
20 |
+
from typing import Dict, List, Optional, Set, Tuple, Union
|
21 |
+
|
22 |
+
import torch
|
23 |
+
import torch.utils.checkpoint
|
24 |
+
from torch import nn
|
25 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
26 |
+
|
27 |
+
from ...activations import ACT2FN
|
28 |
+
from ...modeling_outputs import (
|
29 |
+
BackboneOutput,
|
30 |
+
BaseModelOutput,
|
31 |
+
BaseModelOutputWithPooling,
|
32 |
+
ImageClassifierOutput,
|
33 |
+
)
|
34 |
+
from ...modeling_utils import PreTrainedModel
|
35 |
+
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
36 |
+
from ...utils import (
|
37 |
+
add_code_sample_docstrings,
|
38 |
+
add_start_docstrings,
|
39 |
+
add_start_docstrings_to_model_forward,
|
40 |
+
logging,
|
41 |
+
replace_return_docstrings,
|
42 |
+
)
|
43 |
+
from ...utils.backbone_utils import BackboneMixin
|
44 |
+
from .configuration_dinov2 import Dinov2Config
|
45 |
+
|
46 |
+
|
47 |
+
logger = logging.get_logger(__name__)
|
48 |
+
|
49 |
+
# General docstring
|
50 |
+
_CONFIG_FOR_DOC = "Dinov2Config"
|
51 |
+
|
52 |
+
# Base docstring
|
53 |
+
_CHECKPOINT_FOR_DOC = "facebook/dinov2-base"
|
54 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
|
55 |
+
|
56 |
+
# Image classification docstring
|
57 |
+
_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2-small-imagenet1k-1-layer"
|
58 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
59 |
+
|
60 |
+
|
61 |
+
from ..deprecated._archive_maps import DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
62 |
+
|
63 |
+
|
64 |
+
class Dinov2Embeddings(nn.Module):
|
65 |
+
"""
|
66 |
+
Construct the CLS token, mask token, position and patch embeddings.
|
67 |
+
"""
|
68 |
+
|
69 |
+
def __init__(self, config: Dinov2Config) -> None:
|
70 |
+
super().__init__()
|
71 |
+
|
72 |
+
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
|
73 |
+
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
|
74 |
+
self.patch_embeddings = Dinov2PatchEmbeddings(config)
|
75 |
+
num_patches = self.patch_embeddings.num_patches
|
76 |
+
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
|
77 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
78 |
+
self.config = config
|
79 |
+
|
80 |
+
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
81 |
+
"""
|
82 |
+
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
|
83 |
+
resolution images.
|
84 |
+
|
85 |
+
Source:
|
86 |
+
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
|
87 |
+
"""
|
88 |
+
|
89 |
+
num_patches = embeddings.shape[1] - 1
|
90 |
+
num_positions = self.position_embeddings.shape[1] - 1
|
91 |
+
if num_patches == num_positions and height == width:
|
92 |
+
return self.position_embeddings
|
93 |
+
class_pos_embed = self.position_embeddings[:, 0]
|
94 |
+
patch_pos_embed = self.position_embeddings[:, 1:]
|
95 |
+
dim = embeddings.shape[-1]
|
96 |
+
height = height // self.config.patch_size
|
97 |
+
width = width // self.config.patch_size
|
98 |
+
# we add a small number to avoid floating point error in the interpolation
|
99 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
100 |
+
height, width = height + 0.1, width + 0.1
|
101 |
+
patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
|
102 |
+
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
|
103 |
+
target_dtype = patch_pos_embed.dtype
|
104 |
+
patch_pos_embed = nn.functional.interpolate(
|
105 |
+
patch_pos_embed.to(dtype=torch.float32),
|
106 |
+
scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))),
|
107 |
+
mode="bicubic",
|
108 |
+
align_corners=False,
|
109 |
+
).to(dtype=target_dtype)
|
110 |
+
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
|
111 |
+
raise ValueError("Width or height does not match with the interpolated position embeddings")
|
112 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
113 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
|
114 |
+
|
115 |
+
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
|
116 |
+
batch_size, _, height, width = pixel_values.shape
|
117 |
+
target_dtype = self.patch_embeddings.projection.weight.dtype
|
118 |
+
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
|
119 |
+
|
120 |
+
if bool_masked_pos is not None:
|
121 |
+
embeddings = torch.where(
|
122 |
+
bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
|
123 |
+
)
|
124 |
+
|
125 |
+
# add the [CLS] token to the embedded patch tokens
|
126 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
|
127 |
+
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
|
128 |
+
|
129 |
+
# add positional encoding to each token
|
130 |
+
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
|
131 |
+
|
132 |
+
embeddings = self.dropout(embeddings)
|
133 |
+
|
134 |
+
return embeddings
|
135 |
+
|
136 |
+
|
137 |
+
class Dinov2PatchEmbeddings(nn.Module):
|
138 |
+
"""
|
139 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
|
140 |
+
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
|
141 |
+
Transformer.
|
142 |
+
"""
|
143 |
+
|
144 |
+
def __init__(self, config):
|
145 |
+
super().__init__()
|
146 |
+
image_size, patch_size = config.image_size, config.patch_size
|
147 |
+
num_channels, hidden_size = config.num_channels, config.hidden_size
|
148 |
+
|
149 |
+
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
|
150 |
+
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
|
151 |
+
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
152 |
+
self.image_size = image_size
|
153 |
+
self.patch_size = patch_size
|
154 |
+
self.num_channels = num_channels
|
155 |
+
self.num_patches = num_patches
|
156 |
+
|
157 |
+
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
|
158 |
+
|
159 |
+
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
160 |
+
num_channels = pixel_values.shape[1]
|
161 |
+
if num_channels != self.num_channels:
|
162 |
+
raise ValueError(
|
163 |
+
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
|
164 |
+
f" Expected {self.num_channels} but got {num_channels}."
|
165 |
+
)
|
166 |
+
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
|
167 |
+
return embeddings
|
168 |
+
|
169 |
+
|
170 |
+
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Dinov2
|
171 |
+
class Dinov2SelfAttention(nn.Module):
|
172 |
+
def __init__(self, config: Dinov2Config) -> None:
|
173 |
+
super().__init__()
|
174 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
175 |
+
raise ValueError(
|
176 |
+
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
|
177 |
+
f"heads {config.num_attention_heads}."
|
178 |
+
)
|
179 |
+
|
180 |
+
self.num_attention_heads = config.num_attention_heads
|
181 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
182 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
183 |
+
|
184 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
185 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
186 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
|
187 |
+
|
188 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
189 |
+
|
190 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
191 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
192 |
+
x = x.view(new_x_shape)
|
193 |
+
return x.permute(0, 2, 1, 3)
|
194 |
+
|
195 |
+
def forward(
|
196 |
+
self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
|
197 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
198 |
+
mixed_query_layer = self.query(hidden_states)
|
199 |
+
|
200 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
201 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
202 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
203 |
+
|
204 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
205 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
206 |
+
|
207 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
208 |
+
|
209 |
+
# Normalize the attention scores to probabilities.
|
210 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
211 |
+
|
212 |
+
# This is actually dropping out entire tokens to attend to, which might
|
213 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
214 |
+
attention_probs = self.dropout(attention_probs)
|
215 |
+
|
216 |
+
# Mask heads if we want to
|
217 |
+
if head_mask is not None:
|
218 |
+
attention_probs = attention_probs * head_mask
|
219 |
+
|
220 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
221 |
+
|
222 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
223 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
224 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
225 |
+
|
226 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
227 |
+
|
228 |
+
return outputs
|
229 |
+
|
230 |
+
|
231 |
+
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2
|
232 |
+
class Dinov2SelfOutput(nn.Module):
|
233 |
+
"""
|
234 |
+
The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the
|
235 |
+
layernorm applied before each block.
|
236 |
+
"""
|
237 |
+
|
238 |
+
def __init__(self, config: Dinov2Config) -> None:
|
239 |
+
super().__init__()
|
240 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
241 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
242 |
+
|
243 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
244 |
+
hidden_states = self.dense(hidden_states)
|
245 |
+
hidden_states = self.dropout(hidden_states)
|
246 |
+
|
247 |
+
return hidden_states
|
248 |
+
|
249 |
+
|
250 |
+
# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Dinov2
|
251 |
+
class Dinov2Attention(nn.Module):
|
252 |
+
def __init__(self, config: Dinov2Config) -> None:
|
253 |
+
super().__init__()
|
254 |
+
self.attention = Dinov2SelfAttention(config)
|
255 |
+
self.output = Dinov2SelfOutput(config)
|
256 |
+
self.pruned_heads = set()
|
257 |
+
|
258 |
+
def prune_heads(self, heads: Set[int]) -> None:
|
259 |
+
if len(heads) == 0:
|
260 |
+
return
|
261 |
+
heads, index = find_pruneable_heads_and_indices(
|
262 |
+
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
|
263 |
+
)
|
264 |
+
|
265 |
+
# Prune linear layers
|
266 |
+
self.attention.query = prune_linear_layer(self.attention.query, index)
|
267 |
+
self.attention.key = prune_linear_layer(self.attention.key, index)
|
268 |
+
self.attention.value = prune_linear_layer(self.attention.value, index)
|
269 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
270 |
+
|
271 |
+
# Update hyper params and store pruned heads
|
272 |
+
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
|
273 |
+
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
|
274 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
275 |
+
|
276 |
+
def forward(
|
277 |
+
self,
|
278 |
+
hidden_states: torch.Tensor,
|
279 |
+
head_mask: Optional[torch.Tensor] = None,
|
280 |
+
output_attentions: bool = False,
|
281 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
282 |
+
self_outputs = self.attention(hidden_states, head_mask, output_attentions)
|
283 |
+
|
284 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
285 |
+
|
286 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
287 |
+
return outputs
|
288 |
+
|
289 |
+
|
290 |
+
class Dinov2LayerScale(nn.Module):
|
291 |
+
def __init__(self, config) -> None:
|
292 |
+
super().__init__()
|
293 |
+
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
|
294 |
+
|
295 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
296 |
+
return hidden_state * self.lambda1
|
297 |
+
|
298 |
+
|
299 |
+
# Copied from transformers.models.beit.modeling_beit.drop_path
|
300 |
+
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
|
301 |
+
"""
|
302 |
+
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
303 |
+
|
304 |
+
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
|
305 |
+
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
306 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
|
307 |
+
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
|
308 |
+
argument.
|
309 |
+
"""
|
310 |
+
if drop_prob == 0.0 or not training:
|
311 |
+
return input
|
312 |
+
keep_prob = 1 - drop_prob
|
313 |
+
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
314 |
+
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
|
315 |
+
random_tensor.floor_() # binarize
|
316 |
+
output = input.div(keep_prob) * random_tensor
|
317 |
+
return output
|
318 |
+
|
319 |
+
|
320 |
+
# Copied from transformers.models.beit.modeling_beit.BeitDropPath
|
321 |
+
class Dinov2DropPath(nn.Module):
|
322 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
323 |
+
|
324 |
+
def __init__(self, drop_prob: Optional[float] = None) -> None:
|
325 |
+
super().__init__()
|
326 |
+
self.drop_prob = drop_prob
|
327 |
+
|
328 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
329 |
+
return drop_path(hidden_states, self.drop_prob, self.training)
|
330 |
+
|
331 |
+
def extra_repr(self) -> str:
|
332 |
+
return "p={}".format(self.drop_prob)
|
333 |
+
|
334 |
+
|
335 |
+
class Dinov2MLP(nn.Module):
|
336 |
+
def __init__(self, config) -> None:
|
337 |
+
super().__init__()
|
338 |
+
in_features = out_features = config.hidden_size
|
339 |
+
hidden_features = int(config.hidden_size * config.mlp_ratio)
|
340 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
|
341 |
+
if isinstance(config.hidden_act, str):
|
342 |
+
self.activation = ACT2FN[config.hidden_act]
|
343 |
+
else:
|
344 |
+
self.activation = config.hidden_act
|
345 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
|
346 |
+
|
347 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
348 |
+
hidden_state = self.fc1(hidden_state)
|
349 |
+
hidden_state = self.activation(hidden_state)
|
350 |
+
hidden_state = self.fc2(hidden_state)
|
351 |
+
return hidden_state
|
352 |
+
|
353 |
+
|
354 |
+
class Dinov2SwiGLUFFN(nn.Module):
|
355 |
+
def __init__(self, config) -> None:
|
356 |
+
super().__init__()
|
357 |
+
in_features = out_features = config.hidden_size
|
358 |
+
hidden_features = int(config.hidden_size * config.mlp_ratio)
|
359 |
+
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
|
360 |
+
|
361 |
+
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
|
362 |
+
self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
|
363 |
+
|
364 |
+
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
|
365 |
+
hidden_state = self.weights_in(hidden_state)
|
366 |
+
x1, x2 = hidden_state.chunk(2, dim=-1)
|
367 |
+
hidden = nn.functional.silu(x1) * x2
|
368 |
+
return self.weights_out(hidden)
|
369 |
+
|
370 |
+
|
371 |
+
class Dinov2Layer(nn.Module):
|
372 |
+
"""This corresponds to the Block class in the original implementation."""
|
373 |
+
|
374 |
+
def __init__(self, config: Dinov2Config) -> None:
|
375 |
+
super().__init__()
|
376 |
+
|
377 |
+
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
378 |
+
self.attention = Dinov2Attention(config)
|
379 |
+
self.layer_scale1 = Dinov2LayerScale(config)
|
380 |
+
self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
|
381 |
+
|
382 |
+
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
383 |
+
|
384 |
+
if config.use_swiglu_ffn:
|
385 |
+
self.mlp = Dinov2SwiGLUFFN(config)
|
386 |
+
else:
|
387 |
+
self.mlp = Dinov2MLP(config)
|
388 |
+
self.layer_scale2 = Dinov2LayerScale(config)
|
389 |
+
|
390 |
+
def forward(
|
391 |
+
self,
|
392 |
+
hidden_states: torch.Tensor,
|
393 |
+
head_mask: Optional[torch.Tensor] = None,
|
394 |
+
output_attentions: bool = False,
|
395 |
+
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
|
396 |
+
self_attention_outputs = self.attention(
|
397 |
+
self.norm1(hidden_states), # in Dinov2, layernorm is applied before self-attention
|
398 |
+
head_mask,
|
399 |
+
output_attentions=output_attentions,
|
400 |
+
)
|
401 |
+
attention_output = self_attention_outputs[0]
|
402 |
+
|
403 |
+
attention_output = self.layer_scale1(attention_output)
|
404 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
405 |
+
|
406 |
+
# first residual connection
|
407 |
+
hidden_states = self.drop_path(attention_output) + hidden_states
|
408 |
+
|
409 |
+
# in Dinov2, layernorm is also applied after self-attention
|
410 |
+
layer_output = self.norm2(hidden_states)
|
411 |
+
layer_output = self.mlp(layer_output)
|
412 |
+
layer_output = self.layer_scale2(layer_output)
|
413 |
+
|
414 |
+
# second residual connection
|
415 |
+
layer_output = self.drop_path(layer_output) + hidden_states
|
416 |
+
|
417 |
+
outputs = (layer_output,) + outputs
|
418 |
+
|
419 |
+
return outputs
|
420 |
+
|
421 |
+
|
422 |
+
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2
|
423 |
+
class Dinov2Encoder(nn.Module):
|
424 |
+
def __init__(self, config: Dinov2Config) -> None:
|
425 |
+
super().__init__()
|
426 |
+
self.config = config
|
427 |
+
self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)])
|
428 |
+
self.gradient_checkpointing = False
|
429 |
+
|
430 |
+
def forward(
|
431 |
+
self,
|
432 |
+
hidden_states: torch.Tensor,
|
433 |
+
head_mask: Optional[torch.Tensor] = None,
|
434 |
+
output_attentions: bool = False,
|
435 |
+
output_hidden_states: bool = False,
|
436 |
+
return_dict: bool = True,
|
437 |
+
) -> Union[tuple, BaseModelOutput]:
|
438 |
+
all_hidden_states = () if output_hidden_states else None
|
439 |
+
all_self_attentions = () if output_attentions else None
|
440 |
+
|
441 |
+
for i, layer_module in enumerate(self.layer):
|
442 |
+
if output_hidden_states:
|
443 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
444 |
+
|
445 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
446 |
+
|
447 |
+
if self.gradient_checkpointing and self.training:
|
448 |
+
layer_outputs = self._gradient_checkpointing_func(
|
449 |
+
layer_module.__call__,
|
450 |
+
hidden_states,
|
451 |
+
layer_head_mask,
|
452 |
+
output_attentions,
|
453 |
+
)
|
454 |
+
else:
|
455 |
+
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
|
456 |
+
|
457 |
+
hidden_states = layer_outputs[0]
|
458 |
+
|
459 |
+
if output_attentions:
|
460 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
461 |
+
|
462 |
+
if output_hidden_states:
|
463 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
464 |
+
|
465 |
+
if not return_dict:
|
466 |
+
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
|
467 |
+
return BaseModelOutput(
|
468 |
+
last_hidden_state=hidden_states,
|
469 |
+
hidden_states=all_hidden_states,
|
470 |
+
attentions=all_self_attentions,
|
471 |
+
)
|
472 |
+
|
473 |
+
|
474 |
+
class Dinov2PreTrainedModel(PreTrainedModel):
|
475 |
+
"""
|
476 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
477 |
+
models.
|
478 |
+
"""
|
479 |
+
|
480 |
+
config_class = Dinov2Config
|
481 |
+
base_model_prefix = "dinov2"
|
482 |
+
main_input_name = "pixel_values"
|
483 |
+
supports_gradient_checkpointing = True
|
484 |
+
|
485 |
+
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
|
486 |
+
"""Initialize the weights"""
|
487 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
488 |
+
# Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
|
489 |
+
# `trunc_normal_cpu` not implemented in `half` issues
|
490 |
+
module.weight.data = nn.init.trunc_normal_(
|
491 |
+
module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
|
492 |
+
).to(module.weight.dtype)
|
493 |
+
if module.bias is not None:
|
494 |
+
module.bias.data.zero_()
|
495 |
+
elif isinstance(module, nn.LayerNorm):
|
496 |
+
module.bias.data.zero_()
|
497 |
+
module.weight.data.fill_(1.0)
|
498 |
+
elif isinstance(module, Dinov2Embeddings):
|
499 |
+
module.position_embeddings.data = nn.init.trunc_normal_(
|
500 |
+
module.position_embeddings.data.to(torch.float32),
|
501 |
+
mean=0.0,
|
502 |
+
std=self.config.initializer_range,
|
503 |
+
).to(module.position_embeddings.dtype)
|
504 |
+
|
505 |
+
module.cls_token.data = nn.init.trunc_normal_(
|
506 |
+
module.cls_token.data.to(torch.float32),
|
507 |
+
mean=0.0,
|
508 |
+
std=self.config.initializer_range,
|
509 |
+
).to(module.cls_token.dtype)
|
510 |
+
|
511 |
+
|
512 |
+
DINOV2_START_DOCSTRING = r"""
|
513 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
514 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
515 |
+
behavior.
|
516 |
+
|
517 |
+
Parameters:
|
518 |
+
config ([`Dinov2Config`]): Model configuration class with all the parameters of the model.
|
519 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
520 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
521 |
+
"""
|
522 |
+
|
523 |
+
DINOV2_BASE_INPUTS_DOCSTRING = r"""
|
524 |
+
Args:
|
525 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
526 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
527 |
+
[`BitImageProcessor.preprocess`] for details.
|
528 |
+
|
529 |
+
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
|
530 |
+
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
|
531 |
+
pre-training.
|
532 |
+
|
533 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
534 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
535 |
+
|
536 |
+
- 1 indicates the head is **not masked**,
|
537 |
+
- 0 indicates the head is **masked**.
|
538 |
+
|
539 |
+
output_attentions (`bool`, *optional*):
|
540 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
541 |
+
tensors for more detail.
|
542 |
+
output_hidden_states (`bool`, *optional*):
|
543 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
544 |
+
more detail.
|
545 |
+
return_dict (`bool`, *optional*):
|
546 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
547 |
+
"""
|
548 |
+
|
549 |
+
DINOV2_INPUTS_DOCSTRING = r"""
|
550 |
+
Args:
|
551 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
552 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
553 |
+
[`BitImageProcessor.preprocess`] for details.
|
554 |
+
|
555 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
556 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
557 |
+
|
558 |
+
- 1 indicates the head is **not masked**,
|
559 |
+
- 0 indicates the head is **masked**.
|
560 |
+
|
561 |
+
output_attentions (`bool`, *optional*):
|
562 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
563 |
+
tensors for more detail.
|
564 |
+
output_hidden_states (`bool`, *optional*):
|
565 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
566 |
+
more detail.
|
567 |
+
return_dict (`bool`, *optional*):
|
568 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
569 |
+
"""
|
570 |
+
|
571 |
+
|
572 |
+
@add_start_docstrings(
|
573 |
+
"The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.",
|
574 |
+
DINOV2_START_DOCSTRING,
|
575 |
+
)
|
576 |
+
class Dinov2Model(Dinov2PreTrainedModel):
|
577 |
+
def __init__(self, config: Dinov2Config):
|
578 |
+
super().__init__(config)
|
579 |
+
self.config = config
|
580 |
+
|
581 |
+
self.embeddings = Dinov2Embeddings(config)
|
582 |
+
self.encoder = Dinov2Encoder(config)
|
583 |
+
|
584 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
585 |
+
|
586 |
+
# Initialize weights and apply final processing
|
587 |
+
self.post_init()
|
588 |
+
|
589 |
+
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
|
590 |
+
return self.embeddings.patch_embeddings
|
591 |
+
|
592 |
+
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
|
593 |
+
"""
|
594 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
595 |
+
class PreTrainedModel
|
596 |
+
"""
|
597 |
+
for layer, heads in heads_to_prune.items():
|
598 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
599 |
+
|
600 |
+
@add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING)
|
601 |
+
@add_code_sample_docstrings(
|
602 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
603 |
+
output_type=BaseModelOutputWithPooling,
|
604 |
+
config_class=_CONFIG_FOR_DOC,
|
605 |
+
modality="vision",
|
606 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
607 |
+
)
|
608 |
+
def forward(
|
609 |
+
self,
|
610 |
+
pixel_values: Optional[torch.Tensor] = None,
|
611 |
+
bool_masked_pos: Optional[torch.Tensor] = None,
|
612 |
+
head_mask: Optional[torch.Tensor] = None,
|
613 |
+
output_attentions: Optional[bool] = None,
|
614 |
+
output_hidden_states: Optional[bool] = None,
|
615 |
+
return_dict: Optional[bool] = None,
|
616 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
617 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
618 |
+
output_hidden_states = (
|
619 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
620 |
+
)
|
621 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
622 |
+
|
623 |
+
if pixel_values is None:
|
624 |
+
raise ValueError("You have to specify pixel_values")
|
625 |
+
|
626 |
+
# Prepare head mask if needed
|
627 |
+
# 1.0 in head_mask indicate we keep the head
|
628 |
+
# attention_probs has shape bsz x n_heads x N x N
|
629 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
630 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
631 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
632 |
+
|
633 |
+
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
|
634 |
+
|
635 |
+
encoder_outputs = self.encoder(
|
636 |
+
embedding_output,
|
637 |
+
head_mask=head_mask,
|
638 |
+
output_attentions=output_attentions,
|
639 |
+
output_hidden_states=output_hidden_states,
|
640 |
+
return_dict=return_dict,
|
641 |
+
)
|
642 |
+
sequence_output = encoder_outputs[0]
|
643 |
+
sequence_output = self.layernorm(sequence_output)
|
644 |
+
pooled_output = sequence_output[:, 0, :]
|
645 |
+
|
646 |
+
if not return_dict:
|
647 |
+
head_outputs = (sequence_output, pooled_output)
|
648 |
+
return head_outputs + encoder_outputs[1:]
|
649 |
+
|
650 |
+
return BaseModelOutputWithPooling(
|
651 |
+
last_hidden_state=sequence_output,
|
652 |
+
pooler_output=pooled_output,
|
653 |
+
hidden_states=encoder_outputs.hidden_states,
|
654 |
+
attentions=encoder_outputs.attentions,
|
655 |
+
)
|
656 |
+
|
657 |
+
|
658 |
+
@add_start_docstrings(
|
659 |
+
"""
|
660 |
+
Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state
|
661 |
+
of the [CLS] token) e.g. for ImageNet.
|
662 |
+
""",
|
663 |
+
DINOV2_START_DOCSTRING,
|
664 |
+
)
|
665 |
+
class Dinov2ForImageClassification(Dinov2PreTrainedModel):
|
666 |
+
def __init__(self, config: Dinov2Config) -> None:
|
667 |
+
super().__init__(config)
|
668 |
+
|
669 |
+
self.num_labels = config.num_labels
|
670 |
+
self.dinov2 = Dinov2Model(config)
|
671 |
+
|
672 |
+
# Classifier head
|
673 |
+
self.classifier = (
|
674 |
+
nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
675 |
+
)
|
676 |
+
|
677 |
+
# Initialize weights and apply final processing
|
678 |
+
self.post_init()
|
679 |
+
|
680 |
+
@add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
|
681 |
+
@add_code_sample_docstrings(
|
682 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
683 |
+
output_type=ImageClassifierOutput,
|
684 |
+
config_class=_CONFIG_FOR_DOC,
|
685 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
686 |
+
)
|
687 |
+
def forward(
|
688 |
+
self,
|
689 |
+
pixel_values: Optional[torch.Tensor] = None,
|
690 |
+
head_mask: Optional[torch.Tensor] = None,
|
691 |
+
labels: Optional[torch.Tensor] = None,
|
692 |
+
output_attentions: Optional[bool] = None,
|
693 |
+
output_hidden_states: Optional[bool] = None,
|
694 |
+
return_dict: Optional[bool] = None,
|
695 |
+
) -> Union[tuple, ImageClassifierOutput]:
|
696 |
+
r"""
|
697 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
698 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
699 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
700 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
701 |
+
"""
|
702 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
703 |
+
|
704 |
+
outputs = self.dinov2(
|
705 |
+
pixel_values,
|
706 |
+
head_mask=head_mask,
|
707 |
+
output_attentions=output_attentions,
|
708 |
+
output_hidden_states=output_hidden_states,
|
709 |
+
return_dict=return_dict,
|
710 |
+
)
|
711 |
+
|
712 |
+
sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
|
713 |
+
|
714 |
+
cls_token = sequence_output[:, 0]
|
715 |
+
patch_tokens = sequence_output[:, 1:]
|
716 |
+
|
717 |
+
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
|
718 |
+
|
719 |
+
logits = self.classifier(linear_input)
|
720 |
+
|
721 |
+
loss = None
|
722 |
+
if labels is not None:
|
723 |
+
# move labels to correct device to enable model parallelism
|
724 |
+
labels = labels.to(logits.device)
|
725 |
+
if self.config.problem_type is None:
|
726 |
+
if self.num_labels == 1:
|
727 |
+
self.config.problem_type = "regression"
|
728 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
729 |
+
self.config.problem_type = "single_label_classification"
|
730 |
+
else:
|
731 |
+
self.config.problem_type = "multi_label_classification"
|
732 |
+
|
733 |
+
if self.config.problem_type == "regression":
|
734 |
+
loss_fct = MSELoss()
|
735 |
+
if self.num_labels == 1:
|
736 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
737 |
+
else:
|
738 |
+
loss = loss_fct(logits, labels)
|
739 |
+
elif self.config.problem_type == "single_label_classification":
|
740 |
+
loss_fct = CrossEntropyLoss()
|
741 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
742 |
+
elif self.config.problem_type == "multi_label_classification":
|
743 |
+
loss_fct = BCEWithLogitsLoss()
|
744 |
+
loss = loss_fct(logits, labels)
|
745 |
+
|
746 |
+
if not return_dict:
|
747 |
+
output = (logits,) + outputs[2:]
|
748 |
+
return ((loss,) + output) if loss is not None else output
|
749 |
+
|
750 |
+
return ImageClassifierOutput(
|
751 |
+
loss=loss,
|
752 |
+
logits=logits,
|
753 |
+
hidden_states=outputs.hidden_states,
|
754 |
+
attentions=outputs.attentions,
|
755 |
+
)
|
756 |
+
|
757 |
+
|
758 |
+
@add_start_docstrings(
|
759 |
+
"""
|
760 |
+
Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.
|
761 |
+
""",
|
762 |
+
DINOV2_START_DOCSTRING,
|
763 |
+
)
|
764 |
+
class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin):
|
765 |
+
def __init__(self, config):
|
766 |
+
super().__init__(config)
|
767 |
+
super()._init_backbone(config)
|
768 |
+
|
769 |
+
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
|
770 |
+
self.embeddings = Dinov2Embeddings(config)
|
771 |
+
self.encoder = Dinov2Encoder(config)
|
772 |
+
|
773 |
+
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
774 |
+
|
775 |
+
# Initialize weights and apply final processing
|
776 |
+
self.post_init()
|
777 |
+
|
778 |
+
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
|
779 |
+
return self.embeddings.patch_embeddings
|
780 |
+
|
781 |
+
@add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
|
782 |
+
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
|
783 |
+
def forward(
|
784 |
+
self,
|
785 |
+
pixel_values: torch.Tensor,
|
786 |
+
output_hidden_states: Optional[bool] = None,
|
787 |
+
output_attentions: Optional[bool] = None,
|
788 |
+
return_dict: Optional[bool] = None,
|
789 |
+
) -> BackboneOutput:
|
790 |
+
"""
|
791 |
+
Returns:
|
792 |
+
|
793 |
+
Examples:
|
794 |
+
|
795 |
+
```python
|
796 |
+
>>> from transformers import AutoImageProcessor, AutoBackbone
|
797 |
+
>>> import torch
|
798 |
+
>>> from PIL import Image
|
799 |
+
>>> import requests
|
800 |
+
|
801 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
802 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
803 |
+
|
804 |
+
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
|
805 |
+
>>> model = AutoBackbone.from_pretrained(
|
806 |
+
... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"]
|
807 |
+
... )
|
808 |
+
|
809 |
+
>>> inputs = processor(image, return_tensors="pt")
|
810 |
+
|
811 |
+
>>> outputs = model(**inputs)
|
812 |
+
>>> feature_maps = outputs.feature_maps
|
813 |
+
>>> list(feature_maps[-1].shape)
|
814 |
+
[1, 768, 16, 16]
|
815 |
+
```"""
|
816 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
817 |
+
output_hidden_states = (
|
818 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
819 |
+
)
|
820 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
821 |
+
|
822 |
+
embedding_output = self.embeddings(pixel_values)
|
823 |
+
|
824 |
+
outputs = self.encoder(
|
825 |
+
embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
|
826 |
+
)
|
827 |
+
|
828 |
+
hidden_states = outputs.hidden_states if return_dict else outputs[1]
|
829 |
+
|
830 |
+
feature_maps = ()
|
831 |
+
for stage, hidden_state in zip(self.stage_names, hidden_states):
|
832 |
+
if stage in self.out_features:
|
833 |
+
if self.config.apply_layernorm:
|
834 |
+
hidden_state = self.layernorm(hidden_state)
|
835 |
+
if self.config.reshape_hidden_states:
|
836 |
+
hidden_state = hidden_state[:, 1:]
|
837 |
+
# this was actually a bug in the original implementation that we copied here,
|
838 |
+
# cause normally the order is height, width
|
839 |
+
batch_size, _, height, width = pixel_values.shape
|
840 |
+
patch_size = self.config.patch_size
|
841 |
+
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
|
842 |
+
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
|
843 |
+
feature_maps += (hidden_state,)
|
844 |
+
|
845 |
+
if not return_dict:
|
846 |
+
if output_hidden_states:
|
847 |
+
output = (feature_maps,) + outputs[1:]
|
848 |
+
else:
|
849 |
+
output = (feature_maps,) + outputs[2:]
|
850 |
+
return output
|
851 |
+
|
852 |
+
return BackboneOutput(
|
853 |
+
feature_maps=feature_maps,
|
854 |
+
hidden_states=outputs.hidden_states if output_hidden_states else None,
|
855 |
+
attentions=outputs.attentions if output_attentions else None,
|
856 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import (
|
18 |
+
OptionalDependencyNotAvailable,
|
19 |
+
_LazyModule,
|
20 |
+
is_torch_available,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
_import_structure = {
|
25 |
+
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
|
26 |
+
}
|
27 |
+
|
28 |
+
try:
|
29 |
+
if not is_torch_available():
|
30 |
+
raise OptionalDependencyNotAvailable()
|
31 |
+
except OptionalDependencyNotAvailable:
|
32 |
+
pass
|
33 |
+
else:
|
34 |
+
_import_structure["modeling_gpt_bigcode"] = [
|
35 |
+
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
|
36 |
+
"GPTBigCodeForSequenceClassification",
|
37 |
+
"GPTBigCodeForTokenClassification",
|
38 |
+
"GPTBigCodeForCausalLM",
|
39 |
+
"GPTBigCodeModel",
|
40 |
+
"GPTBigCodePreTrainedModel",
|
41 |
+
]
|
42 |
+
|
43 |
+
if TYPE_CHECKING:
|
44 |
+
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
|
45 |
+
|
46 |
+
try:
|
47 |
+
if not is_torch_available():
|
48 |
+
raise OptionalDependencyNotAvailable()
|
49 |
+
except OptionalDependencyNotAvailable:
|
50 |
+
pass
|
51 |
+
else:
|
52 |
+
from .modeling_gpt_bigcode import (
|
53 |
+
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
|
54 |
+
GPTBigCodeForCausalLM,
|
55 |
+
GPTBigCodeForSequenceClassification,
|
56 |
+
GPTBigCodeForTokenClassification,
|
57 |
+
GPTBigCodeModel,
|
58 |
+
GPTBigCodePreTrainedModel,
|
59 |
+
)
|
60 |
+
|
61 |
+
|
62 |
+
else:
|
63 |
+
import sys
|
64 |
+
|
65 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc
ADDED
Binary file (5.53 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc
ADDED
Binary file (38.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/__init__.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_nllb_moe": [
|
22 |
+
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
23 |
+
"NllbMoeConfig",
|
24 |
+
]
|
25 |
+
}
|
26 |
+
|
27 |
+
try:
|
28 |
+
if not is_torch_available():
|
29 |
+
raise OptionalDependencyNotAvailable()
|
30 |
+
except OptionalDependencyNotAvailable:
|
31 |
+
pass
|
32 |
+
else:
|
33 |
+
_import_structure["modeling_nllb_moe"] = [
|
34 |
+
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
|
35 |
+
"NllbMoeForConditionalGeneration",
|
36 |
+
"NllbMoeModel",
|
37 |
+
"NllbMoePreTrainedModel",
|
38 |
+
"NllbMoeTop2Router",
|
39 |
+
"NllbMoeSparseMLP",
|
40 |
+
]
|
41 |
+
|
42 |
+
|
43 |
+
if TYPE_CHECKING:
|
44 |
+
from .configuration_nllb_moe import (
|
45 |
+
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
46 |
+
NllbMoeConfig,
|
47 |
+
)
|
48 |
+
|
49 |
+
try:
|
50 |
+
if not is_torch_available():
|
51 |
+
raise OptionalDependencyNotAvailable()
|
52 |
+
except OptionalDependencyNotAvailable:
|
53 |
+
pass
|
54 |
+
else:
|
55 |
+
from .modeling_nllb_moe import (
|
56 |
+
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
|
57 |
+
NllbMoeForConditionalGeneration,
|
58 |
+
NllbMoeModel,
|
59 |
+
NllbMoePreTrainedModel,
|
60 |
+
NllbMoeSparseMLP,
|
61 |
+
NllbMoeTop2Router,
|
62 |
+
)
|
63 |
+
|
64 |
+
|
65 |
+
else:
|
66 |
+
import sys
|
67 |
+
|
68 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/configuration_nllb_moe.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023, HuggingFace Inc.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" NLLB-MoE model configuration"""
|
16 |
+
from ...configuration_utils import PretrainedConfig
|
17 |
+
from ...utils import logging
|
18 |
+
|
19 |
+
|
20 |
+
logger = logging.get_logger(__name__)
|
21 |
+
|
22 |
+
|
23 |
+
from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
24 |
+
|
25 |
+
|
26 |
+
class NllbMoeConfig(PretrainedConfig):
|
27 |
+
r"""
|
28 |
+
This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an
|
29 |
+
NLLB-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
30 |
+
with the defaults will yield a similar configuration to that of the NLLB-MoE
|
31 |
+
[facebook/nllb-moe-54b](https://huggingface.co/facebook/nllb-moe-54b) architecture.
|
32 |
+
|
33 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
34 |
+
documentation from [`PretrainedConfig`] for more information.
|
35 |
+
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_size (`int`, *optional*, defaults to 50265):
|
39 |
+
Vocabulary size of the NllbMoe model. Defines the number of different tokens that can be represented by the
|
40 |
+
`inputs_ids` passed when calling [`NllbMoeModel`] or
|
41 |
+
d_model (`int`, *optional*, defaults to 1024):
|
42 |
+
Dimensionality of the layers and the pooler layer.
|
43 |
+
encoder_layers (`int`, *optional*, defaults to 12):
|
44 |
+
Number of encoder layers.
|
45 |
+
decoder_layers (`int`, *optional*, defaults to 12):
|
46 |
+
Number of decoder layers.
|
47 |
+
encoder_attention_heads (`int`, *optional*, defaults to 16):
|
48 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
49 |
+
decoder_attention_heads (`int`, *optional*, defaults to 16):
|
50 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
51 |
+
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
52 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
53 |
+
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
54 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
|
55 |
+
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
|
56 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
57 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
58 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
59 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
60 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
61 |
+
The dropout ratio for the attention probabilities.
|
62 |
+
activation_dropout (`float`, *optional*, defaults to 0.0):
|
63 |
+
The dropout ratio for activations inside the fully connected layer.
|
64 |
+
classifier_dropout (`float`, *optional*, defaults to 0.0):
|
65 |
+
The dropout ratio for classifier.
|
66 |
+
max_position_embeddings (`int`, *optional*, defaults to 1024):
|
67 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
68 |
+
just in case (e.g., 512 or 1024 or 2048).
|
69 |
+
init_std (`float`, *optional*, defaults to 0.02):
|
70 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
71 |
+
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
72 |
+
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
73 |
+
for more details.
|
74 |
+
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
75 |
+
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
76 |
+
for more details.
|
77 |
+
second_expert_policy ( `str`, *optional*, default to `"all"`):
|
78 |
+
The policy used for the sampling the probability of being sampled to a second expert for each token.
|
79 |
+
normalize_router_prob_before_dropping (`bool`, *optional*, defaults to `True`):
|
80 |
+
Whether or not to normalize the router probabilities before applying a mask based on the experts capacity
|
81 |
+
(capacity dropping).
|
82 |
+
batch_prioritized_routing (`bool`, *optional*, defaults to `True`):
|
83 |
+
Whether or not to orders the tokens by their router probabilities before capacity dropping. This means that
|
84 |
+
the tokens that have the highest probabilities will be routed before other tokens that might be further in
|
85 |
+
the sequence.
|
86 |
+
moe_eval_capacity_token_fraction (`float`, *optional*, defaults to 1.0):
|
87 |
+
Fraction of tokens as capacity during validation, if set to negative, uses the same as training. Should be
|
88 |
+
in range: (0.0, 1.0].
|
89 |
+
num_experts (`int`, *optional*, defaults to 128):
|
90 |
+
Number of experts for each NllbMoeSparseMlp layer.
|
91 |
+
expert_capacity (`int`, *optional*, defaults to 64):
|
92 |
+
Number of tokens that can be stored in each expert.
|
93 |
+
encoder_sparse_step (`int`, *optional*, defaults to 4):
|
94 |
+
Frequency of the sparse layers in the encoder. 4 means that one out of 4 layers will be sparse.
|
95 |
+
decoder_sparse_step (`int`, *optional*, defaults to 4):
|
96 |
+
Frequency of the sparse layers in the decoder. 4 means that one out of 4 layers will be sparse.
|
97 |
+
router_dtype (`str`, *optional*, default to `"float32"`):
|
98 |
+
The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
|
99 |
+
*selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961).
|
100 |
+
router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
|
101 |
+
Whether to ignore padding tokens when routing. if `False`, the padding tokens are not routed to any
|
102 |
+
experts.
|
103 |
+
router_bias (`bool`, *optional*, defaults to `False`):
|
104 |
+
Whether or not the classifier of the router should have a bias.
|
105 |
+
moe_token_dropout (`float`, *optional*, defualt ot 0.2):
|
106 |
+
Masking rate for MoE expert output masking (EOM), which is implemented via a Dropout2d on the expert
|
107 |
+
outputs.
|
108 |
+
output_router_logits (`bool`, *optional*, defaults to `False`):
|
109 |
+
Whether or not to return the router logits. Only set to `True` to get the auxiliary loss when training.
|
110 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
111 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
112 |
+
|
113 |
+
Example:
|
114 |
+
|
115 |
+
```python
|
116 |
+
>>> from transformers import NllbMoeModel, NllbMoeConfig
|
117 |
+
|
118 |
+
>>> # Initializing a NllbMoe facebook/nllb-moe-54b style configuration
|
119 |
+
>>> configuration = NllbMoeConfig()
|
120 |
+
|
121 |
+
>>> # Initializing a model from the facebook/nllb-moe-54b style configuration
|
122 |
+
>>> model = NllbMoeModel(configuration)
|
123 |
+
|
124 |
+
>>> # Accessing the model configuration
|
125 |
+
>>> configuration = model.config
|
126 |
+
```"""
|
127 |
+
|
128 |
+
model_type = "nllb-moe"
|
129 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
130 |
+
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
|
131 |
+
|
132 |
+
def __init__(
|
133 |
+
self,
|
134 |
+
vocab_size=128112,
|
135 |
+
max_position_embeddings=1024,
|
136 |
+
encoder_layers=12,
|
137 |
+
encoder_ffn_dim=4096,
|
138 |
+
encoder_attention_heads=16,
|
139 |
+
decoder_layers=12,
|
140 |
+
decoder_ffn_dim=4096,
|
141 |
+
decoder_attention_heads=16,
|
142 |
+
encoder_layerdrop=0.05,
|
143 |
+
decoder_layerdrop=0.05,
|
144 |
+
use_cache=True,
|
145 |
+
is_encoder_decoder=True,
|
146 |
+
activation_function="relu",
|
147 |
+
d_model=1024,
|
148 |
+
dropout=0.1,
|
149 |
+
attention_dropout=0.1,
|
150 |
+
activation_dropout=0.0,
|
151 |
+
init_std=0.02,
|
152 |
+
decoder_start_token_id=2,
|
153 |
+
scale_embedding=True,
|
154 |
+
router_bias=False,
|
155 |
+
router_dtype="float32",
|
156 |
+
router_ignore_padding_tokens=False,
|
157 |
+
num_experts=128,
|
158 |
+
expert_capacity=64,
|
159 |
+
encoder_sparse_step=4,
|
160 |
+
decoder_sparse_step=4,
|
161 |
+
router_z_loss_coef=0.001,
|
162 |
+
router_aux_loss_coef=0.001,
|
163 |
+
second_expert_policy="all",
|
164 |
+
normalize_router_prob_before_dropping=False,
|
165 |
+
batch_prioritized_routing=False,
|
166 |
+
moe_eval_capacity_token_fraction=1.0,
|
167 |
+
moe_token_dropout=0.2,
|
168 |
+
pad_token_id=1,
|
169 |
+
bos_token_id=0,
|
170 |
+
eos_token_id=2,
|
171 |
+
output_router_logits=False,
|
172 |
+
**kwargs,
|
173 |
+
):
|
174 |
+
self.vocab_size = vocab_size
|
175 |
+
self.max_position_embeddings = max_position_embeddings
|
176 |
+
self.d_model = d_model
|
177 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
178 |
+
self.encoder_layers = encoder_layers
|
179 |
+
self.encoder_attention_heads = encoder_attention_heads
|
180 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
181 |
+
self.decoder_layers = decoder_layers
|
182 |
+
self.decoder_attention_heads = decoder_attention_heads
|
183 |
+
self.dropout = dropout
|
184 |
+
self.attention_dropout = attention_dropout
|
185 |
+
self.activation_dropout = activation_dropout
|
186 |
+
self.activation_function = activation_function
|
187 |
+
self.init_std = init_std
|
188 |
+
self.encoder_layerdrop = encoder_layerdrop
|
189 |
+
self.decoder_layerdrop = decoder_layerdrop
|
190 |
+
self.use_cache = use_cache
|
191 |
+
self.num_hidden_layers = encoder_layers
|
192 |
+
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
193 |
+
self.router_z_loss_coef = router_z_loss_coef
|
194 |
+
self.router_aux_loss_coef = router_aux_loss_coef
|
195 |
+
self.decoder_sparse_step = decoder_sparse_step
|
196 |
+
self.encoder_sparse_step = encoder_sparse_step
|
197 |
+
self.num_experts = num_experts
|
198 |
+
self.expert_capacity = expert_capacity
|
199 |
+
self.router_bias = router_bias
|
200 |
+
if router_dtype not in ["float32", "float16", "bfloat16"]:
|
201 |
+
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
|
202 |
+
self.router_dtype = router_dtype
|
203 |
+
|
204 |
+
self.router_ignore_padding_tokens = router_ignore_padding_tokens
|
205 |
+
self.batch_prioritized_routing = batch_prioritized_routing
|
206 |
+
self.second_expert_policy = second_expert_policy
|
207 |
+
self.normalize_router_prob_before_dropping = normalize_router_prob_before_dropping
|
208 |
+
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
|
209 |
+
self.moe_token_dropout = moe_token_dropout
|
210 |
+
self.output_router_logits = output_router_logits
|
211 |
+
super().__init__(
|
212 |
+
pad_token_id=pad_token_id,
|
213 |
+
bos_token_id=bos_token_id,
|
214 |
+
eos_token_id=eos_token_id,
|
215 |
+
is_encoder_decoder=is_encoder_decoder,
|
216 |
+
decoder_start_token_id=decoder_start_token_id,
|
217 |
+
**kwargs,
|
218 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
import argparse
|
15 |
+
import json
|
16 |
+
import os
|
17 |
+
|
18 |
+
import torch
|
19 |
+
from torch import nn
|
20 |
+
|
21 |
+
from transformers import NllbMoeConfig, NllbMoeModel
|
22 |
+
from transformers.modeling_utils import dtype_byte_size
|
23 |
+
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
|
24 |
+
|
25 |
+
|
26 |
+
def remove_ignore_keys_(state_dict):
|
27 |
+
ignore_keys = [
|
28 |
+
"encoder.version",
|
29 |
+
"decoder.version",
|
30 |
+
"model.encoder.version",
|
31 |
+
"model.decoder.version",
|
32 |
+
"decoder.output_projection.weight",
|
33 |
+
"_float_tensor",
|
34 |
+
"encoder.embed_positions._float_tensor",
|
35 |
+
"decoder.embed_positions._float_tensor",
|
36 |
+
]
|
37 |
+
for k in ignore_keys:
|
38 |
+
state_dict.pop(k, None)
|
39 |
+
|
40 |
+
|
41 |
+
def make_linear_from_emb(emb):
|
42 |
+
vocab_size, emb_size = emb.weight.shape
|
43 |
+
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
|
44 |
+
lin_layer.weight.data = emb.weight.data
|
45 |
+
return lin_layer
|
46 |
+
|
47 |
+
|
48 |
+
def rename_fairseq_keys(state_dict, expert_idx=None):
|
49 |
+
new_dict = {}
|
50 |
+
for old_key in state_dict.keys():
|
51 |
+
key = old_key
|
52 |
+
if "moe_layer.experts." in key:
|
53 |
+
if expert_idx is not None:
|
54 |
+
key = key.replace("moe_layer.experts.0", f"ffn.experts.expert_{expert_idx}")
|
55 |
+
else:
|
56 |
+
key = key.replace("moe_layer.experts.", "ffn.experts.expert_")
|
57 |
+
if "gate" in key:
|
58 |
+
key = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier")
|
59 |
+
if "fc2" and "experts" not in key:
|
60 |
+
key = key.replace(".fc2.", ".ffn.fc2.")
|
61 |
+
if "fc1" and "experts" not in key:
|
62 |
+
key = key.replace(".fc1.", ".ffn.fc1.")
|
63 |
+
if ".encoder_attn." in key:
|
64 |
+
key = key.replace(".encoder_attn.", ".cross_attention.")
|
65 |
+
if "encoder_attn_layer_norm" in key:
|
66 |
+
key = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm")
|
67 |
+
if "final_layer_norm" in key:
|
68 |
+
key = key.replace("final_layer_norm", "ff_layer_norm")
|
69 |
+
new_dict[key] = state_dict[old_key]
|
70 |
+
return new_dict
|
71 |
+
|
72 |
+
|
73 |
+
def shard_on_the_fly(switch_checkpoint_path, dump_path, num_experts, dtype, weights_name: str = WEIGHTS_NAME):
|
74 |
+
sharded_state_dicts = []
|
75 |
+
total_size = 0
|
76 |
+
os.makedirs(dump_path, exist_ok=True)
|
77 |
+
|
78 |
+
for expert in range(num_experts):
|
79 |
+
expert_path = switch_checkpoint_path + f"-rank-{expert}.pt"
|
80 |
+
if os.path.isfile(expert_path):
|
81 |
+
expert_state = torch.load(expert_path)["model"]
|
82 |
+
remove_ignore_keys_(expert_state)
|
83 |
+
expert_state = rename_fairseq_keys(expert_state, expert)
|
84 |
+
save_path = os.path.join(
|
85 |
+
dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin")
|
86 |
+
)
|
87 |
+
torch.save(expert_state, save_path)
|
88 |
+
sharded_state_dicts.append(expert_state.keys())
|
89 |
+
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
|
90 |
+
expert_state[list(expert_state)[0]].dtype
|
91 |
+
)
|
92 |
+
|
93 |
+
# Add the last block
|
94 |
+
save_path = os.path.join(dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin"))
|
95 |
+
shared_weights = torch.load(switch_checkpoint_path + "-shared.pt")["model"]
|
96 |
+
remove_ignore_keys_(shared_weights)
|
97 |
+
shared_weights = rename_fairseq_keys(shared_weights, None)
|
98 |
+
shared_weights["shared.weight"] = shared_weights["decoder.embed_tokens.weight"]
|
99 |
+
sharded_state_dicts.append(shared_weights.keys())
|
100 |
+
|
101 |
+
# If we only have the shared weights (dummy model/experts saved on the same file)
|
102 |
+
if len(sharded_state_dicts) == 1:
|
103 |
+
save_path = os.path.join(dump_path, weights_name)
|
104 |
+
torch.save(shared_weights, save_path)
|
105 |
+
return {weights_name: sharded_state_dicts[0]}, None
|
106 |
+
else:
|
107 |
+
torch.save(shared_weights, save_path)
|
108 |
+
# Otherwise, let's build the index
|
109 |
+
weight_map = {}
|
110 |
+
for idx, shard in enumerate(sharded_state_dicts):
|
111 |
+
shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin")
|
112 |
+
temp_filename = os.path.join(dump_path, weights_name.replace(".bin", f"-{idx+1:05d}-of-???.bin"))
|
113 |
+
os.rename(temp_filename, os.path.join(dump_path, shard_file))
|
114 |
+
for key in shard:
|
115 |
+
weight_map[key] = shard_file
|
116 |
+
|
117 |
+
# Add the metadata
|
118 |
+
metadata = {"total_size": total_size}
|
119 |
+
index = {"metadata": metadata, "weight_map": weight_map}
|
120 |
+
|
121 |
+
with open(os.path.join(dump_path, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f:
|
122 |
+
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
123 |
+
f.write(content)
|
124 |
+
|
125 |
+
return metadata, index
|
126 |
+
|
127 |
+
|
128 |
+
if __name__ == "__main__":
|
129 |
+
parser = argparse.ArgumentParser()
|
130 |
+
# Required parameters
|
131 |
+
parser.add_argument(
|
132 |
+
"--nllb_moe_checkpoint_path",
|
133 |
+
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
|
134 |
+
type=str,
|
135 |
+
required=False,
|
136 |
+
help="Path to a directory containing a folder per layer. Follows the original Google format.",
|
137 |
+
)
|
138 |
+
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
|
139 |
+
parser.add_argument(
|
140 |
+
"--pytorch_dump_folder_path",
|
141 |
+
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
|
142 |
+
type=str,
|
143 |
+
required=False,
|
144 |
+
help="Path to the output pytorch model.",
|
145 |
+
)
|
146 |
+
args = parser.parse_args()
|
147 |
+
metadata, index = shard_on_the_fly(
|
148 |
+
args.nllb_moe_checkpoint_path,
|
149 |
+
args.pytorch_dump_folder_path,
|
150 |
+
128,
|
151 |
+
args.dtype,
|
152 |
+
)
|
153 |
+
|
154 |
+
config = NllbMoeConfig.from_pretrained(
|
155 |
+
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
|
156 |
+
)
|
157 |
+
config.save_pretrained(args.pytorch_dump_folder_path)
|
158 |
+
model = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
|
159 |
+
print("Done")
|
160 |
+
model.save_pretrained(args.pytorch_dump_folder_path)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/modeling_nllb_moe.py
ADDED
@@ -0,0 +1,1792 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 NllbMoe Authors and HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch NLLB-MoE model."""
|
16 |
+
|
17 |
+
|
18 |
+
import math
|
19 |
+
from typing import List, Optional, Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.nn as nn
|
23 |
+
from torch.nn import CrossEntropyLoss
|
24 |
+
|
25 |
+
from ...activations import ACT2FN
|
26 |
+
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
|
27 |
+
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
|
28 |
+
from ...modeling_outputs import (
|
29 |
+
MoEModelOutput,
|
30 |
+
MoEModelOutputWithPastAndCrossAttentions,
|
31 |
+
Seq2SeqMoEModelOutput,
|
32 |
+
Seq2SeqMoEOutput,
|
33 |
+
)
|
34 |
+
from ...modeling_utils import PreTrainedModel
|
35 |
+
from ...utils import (
|
36 |
+
add_end_docstrings,
|
37 |
+
add_start_docstrings,
|
38 |
+
add_start_docstrings_to_model_forward,
|
39 |
+
logging,
|
40 |
+
replace_return_docstrings,
|
41 |
+
)
|
42 |
+
from .configuration_nllb_moe import NllbMoeConfig
|
43 |
+
|
44 |
+
|
45 |
+
logger = logging.get_logger(__name__)
|
46 |
+
|
47 |
+
_CONFIG_FOR_DOC = "NllbMoeConfig"
|
48 |
+
_CHECKPOINT_FOR_DOC = "hf-internal-testing/dummy-nllb-moe-2-experts"
|
49 |
+
_REAL_CHECKPOINT_FOR_DOC = "facebook/nllb-moe-54b"
|
50 |
+
|
51 |
+
|
52 |
+
####################################################
|
53 |
+
# This dict contains ids and associated url
|
54 |
+
# for the pretrained weights provided with the models
|
55 |
+
####################################################
|
56 |
+
|
57 |
+
from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
58 |
+
|
59 |
+
|
60 |
+
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
|
61 |
+
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
|
62 |
+
"""
|
63 |
+
Shift input ids one token to the right.
|
64 |
+
"""
|
65 |
+
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
|
66 |
+
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
|
67 |
+
shifted_input_ids[:, 0] = decoder_start_token_id
|
68 |
+
|
69 |
+
if pad_token_id is None:
|
70 |
+
raise ValueError("self.model.config.pad_token_id has to be defined.")
|
71 |
+
# replace possible -100 values in labels by `pad_token_id`
|
72 |
+
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
|
73 |
+
|
74 |
+
return shifted_input_ids
|
75 |
+
|
76 |
+
|
77 |
+
# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
|
78 |
+
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
|
79 |
+
"""
|
80 |
+
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
|
81 |
+
are ignored. This is modified from fairseq's `utils.make_positions`.
|
82 |
+
|
83 |
+
Args:
|
84 |
+
x: torch.Tensor x:
|
85 |
+
|
86 |
+
Returns: torch.Tensor
|
87 |
+
"""
|
88 |
+
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
|
89 |
+
mask = input_ids.ne(padding_idx).int()
|
90 |
+
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
|
91 |
+
return incremental_indices.long() + padding_idx
|
92 |
+
|
93 |
+
|
94 |
+
def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
|
95 |
+
r"""
|
96 |
+
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
|
97 |
+
|
98 |
+
See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
|
99 |
+
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
|
100 |
+
experts is too unbalanced.
|
101 |
+
|
102 |
+
Args:
|
103 |
+
router_probs (`torch.Tensor`):
|
104 |
+
Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
|
105 |
+
expert_indices (`torch.Tensor`):
|
106 |
+
Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
|
107 |
+
|
108 |
+
Returns:
|
109 |
+
The auxiliary loss.
|
110 |
+
"""
|
111 |
+
if router_probs is None:
|
112 |
+
return 0
|
113 |
+
|
114 |
+
num_experts = router_probs.shape[-1]
|
115 |
+
|
116 |
+
# cast the expert indices to int64, otherwise one-hot encoding will fail
|
117 |
+
if expert_indices.dtype != torch.int64:
|
118 |
+
expert_indices = expert_indices.to(torch.int64)
|
119 |
+
|
120 |
+
if len(expert_indices.shape) == 2:
|
121 |
+
expert_indices = expert_indices.unsqueeze(2)
|
122 |
+
|
123 |
+
expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
|
124 |
+
|
125 |
+
# For a given token, determine if it was routed to a given expert.
|
126 |
+
expert_mask = torch.max(expert_mask, axis=-2).values
|
127 |
+
|
128 |
+
# cast to float32 otherwise mean will fail
|
129 |
+
expert_mask = expert_mask.to(torch.float32)
|
130 |
+
tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
|
131 |
+
|
132 |
+
router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
|
133 |
+
return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2)
|
134 |
+
|
135 |
+
|
136 |
+
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding
|
137 |
+
class NllbMoeSinusoidalPositionalEmbedding(nn.Module):
|
138 |
+
"""This module produces sinusoidal positional embeddings of any length."""
|
139 |
+
|
140 |
+
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
|
141 |
+
super().__init__()
|
142 |
+
self.offset = 2
|
143 |
+
self.embedding_dim = embedding_dim
|
144 |
+
self.padding_idx = padding_idx
|
145 |
+
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
|
146 |
+
|
147 |
+
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
|
148 |
+
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
|
149 |
+
if hasattr(self, "weights"):
|
150 |
+
# in forward put the weights on the correct dtype and device of the param
|
151 |
+
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
|
152 |
+
|
153 |
+
self.register_buffer("weights", emb_weights, persistent=False)
|
154 |
+
|
155 |
+
@staticmethod
|
156 |
+
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
|
157 |
+
"""
|
158 |
+
Build sinusoidal embeddings.
|
159 |
+
|
160 |
+
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
|
161 |
+
"Attention Is All You Need".
|
162 |
+
"""
|
163 |
+
half_dim = embedding_dim // 2
|
164 |
+
emb = math.log(10000) / (half_dim - 1)
|
165 |
+
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
|
166 |
+
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
|
167 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
|
168 |
+
if embedding_dim % 2 == 1:
|
169 |
+
# zero pad
|
170 |
+
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
|
171 |
+
if padding_idx is not None:
|
172 |
+
emb[padding_idx, :] = 0
|
173 |
+
|
174 |
+
return emb.to(torch.get_default_dtype())
|
175 |
+
|
176 |
+
@torch.no_grad()
|
177 |
+
def forward(
|
178 |
+
self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
|
179 |
+
):
|
180 |
+
if input_ids is not None:
|
181 |
+
bsz, seq_len = input_ids.size()
|
182 |
+
# Create the position ids from the input token ids. Any padded tokens remain padded.
|
183 |
+
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
|
184 |
+
input_ids.device
|
185 |
+
)
|
186 |
+
else:
|
187 |
+
bsz, seq_len = inputs_embeds.size()[:-1]
|
188 |
+
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
|
189 |
+
|
190 |
+
# expand embeddings if needed
|
191 |
+
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
|
192 |
+
if max_pos > self.weights.size(0):
|
193 |
+
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
|
194 |
+
|
195 |
+
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
|
196 |
+
|
197 |
+
def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
|
198 |
+
"""
|
199 |
+
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
|
200 |
+
|
201 |
+
Args:
|
202 |
+
inputs_embeds: torch.Tensor
|
203 |
+
|
204 |
+
Returns: torch.Tensor
|
205 |
+
"""
|
206 |
+
input_shape = inputs_embeds.size()[:-1]
|
207 |
+
sequence_length = input_shape[1]
|
208 |
+
|
209 |
+
position_ids = torch.arange(
|
210 |
+
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
|
211 |
+
)
|
212 |
+
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
|
213 |
+
|
214 |
+
|
215 |
+
class NllbMoeTop2Router(nn.Module):
|
216 |
+
"""
|
217 |
+
Router using tokens choose top-2 experts assignment.
|
218 |
+
|
219 |
+
This router uses the same mechanism as in NLLB-MoE from the fairseq repository. Items are sorted by router_probs
|
220 |
+
and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee
|
221 |
+
that each token is processed by an expert**, or that each expert receives at least one token.
|
222 |
+
|
223 |
+
The router combining weights are also returned to make sure that the states that are not updated will be masked.
|
224 |
+
|
225 |
+
"""
|
226 |
+
|
227 |
+
def __init__(self, config: NllbMoeConfig):
|
228 |
+
super().__init__()
|
229 |
+
self.num_experts = config.num_experts
|
230 |
+
self.expert_capacity = config.expert_capacity
|
231 |
+
self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
|
232 |
+
self.router_ignore_padding_tokens = config.router_ignore_padding_tokens
|
233 |
+
self.dtype = getattr(torch, config.router_dtype)
|
234 |
+
|
235 |
+
self.second_expert_policy = config.second_expert_policy
|
236 |
+
self.normalize_router_prob_before_dropping = config.normalize_router_prob_before_dropping
|
237 |
+
self.batch_prioritized_routing = config.batch_prioritized_routing
|
238 |
+
self.moe_eval_capacity_token_fraction = config.moe_eval_capacity_token_fraction
|
239 |
+
|
240 |
+
def _cast_classifier(self):
|
241 |
+
r"""
|
242 |
+
`bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
|
243 |
+
instance of the `Linear8bitLt` class by checking special attributes.
|
244 |
+
"""
|
245 |
+
if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")):
|
246 |
+
self.classifier = self.classifier.to(self.dtype)
|
247 |
+
|
248 |
+
def normalize_router_probabilities(self, router_probs, top_1_mask, top_2_mask):
|
249 |
+
top_1_max_probs = (router_probs * top_1_mask).sum(dim=1)
|
250 |
+
top_2_max_probs = (router_probs * top_2_mask).sum(dim=1)
|
251 |
+
denom_s = torch.clamp(top_1_max_probs + top_2_max_probs, min=torch.finfo(router_probs.dtype).eps)
|
252 |
+
top_1_max_probs = top_1_max_probs / denom_s
|
253 |
+
top_2_max_probs = top_2_max_probs / denom_s
|
254 |
+
return top_1_max_probs, top_2_max_probs
|
255 |
+
|
256 |
+
def route_tokens(
|
257 |
+
self,
|
258 |
+
router_logits: torch.Tensor,
|
259 |
+
input_dtype: torch.dtype = torch.float32,
|
260 |
+
padding_mask: Optional[torch.LongTensor] = None,
|
261 |
+
) -> Tuple:
|
262 |
+
"""
|
263 |
+
Computes the `dispatch_mask` and the `dispatch_weights` for each experts. The masks are adapted to the expert
|
264 |
+
capacity.
|
265 |
+
"""
|
266 |
+
nb_tokens = router_logits.shape[0]
|
267 |
+
# Apply Softmax and cast back to the original `dtype`
|
268 |
+
router_probs = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(input_dtype)
|
269 |
+
top_1_expert_index = torch.argmax(router_probs, dim=-1)
|
270 |
+
top_1_mask = torch.nn.functional.one_hot(top_1_expert_index, num_classes=self.num_experts)
|
271 |
+
|
272 |
+
if self.second_expert_policy == "sampling":
|
273 |
+
gumbel = torch.distributions.gumbel.Gumbel(0, 1).rsample
|
274 |
+
router_logits += gumbel(router_logits.shape).to(router_logits.device)
|
275 |
+
|
276 |
+
# replace top_1_expert_index with min values
|
277 |
+
logits_except_top_1 = router_logits.masked_fill(top_1_mask.bool(), float("-inf"))
|
278 |
+
top_2_expert_index = torch.argmax(logits_except_top_1, dim=-1)
|
279 |
+
top_2_mask = torch.nn.functional.one_hot(top_2_expert_index, num_classes=self.num_experts)
|
280 |
+
|
281 |
+
if self.normalize_router_prob_before_dropping:
|
282 |
+
top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(
|
283 |
+
router_probs, top_1_mask, top_2_mask
|
284 |
+
)
|
285 |
+
|
286 |
+
if self.second_expert_policy == "random":
|
287 |
+
top_2_max_probs = (router_probs * top_2_mask).sum(dim=1)
|
288 |
+
sampled = (2 * top_2_max_probs) > torch.rand_like(top_2_max_probs.float())
|
289 |
+
top_2_mask = top_2_mask * sampled.repeat(self.num_experts, 1).transpose(1, 0)
|
290 |
+
|
291 |
+
if padding_mask is not None and not self.router_ignore_padding_tokens:
|
292 |
+
if len(padding_mask.shape) == 4:
|
293 |
+
# only get the last causal mask
|
294 |
+
padding_mask = padding_mask[:, :, -1, :].reshape(-1)[-nb_tokens:]
|
295 |
+
non_padding = ~padding_mask.bool()
|
296 |
+
top_1_mask = top_1_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype)
|
297 |
+
top_2_mask = top_2_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype)
|
298 |
+
|
299 |
+
if self.batch_prioritized_routing:
|
300 |
+
# sort tokens based on their routing probability
|
301 |
+
# to make sure important tokens are routed, first
|
302 |
+
importance_scores = -1 * router_probs.max(dim=1)[0]
|
303 |
+
sorted_top_1_mask = top_1_mask[importance_scores.argsort(dim=0)]
|
304 |
+
sorted_cumsum1 = (torch.cumsum(sorted_top_1_mask, dim=0) - 1) * sorted_top_1_mask
|
305 |
+
locations1 = sorted_cumsum1[importance_scores.argsort(dim=0).argsort(dim=0)]
|
306 |
+
|
307 |
+
sorted_top_2_mask = top_2_mask[importance_scores.argsort(dim=0)]
|
308 |
+
sorted_cumsum2 = (torch.cumsum(sorted_top_2_mask, dim=0) - 1) * sorted_top_2_mask
|
309 |
+
locations2 = sorted_cumsum2[importance_scores.argsort(dim=0).argsort(dim=0)]
|
310 |
+
# Update 2nd's location by accounting for locations of 1st
|
311 |
+
locations2 += torch.sum(top_1_mask, dim=0, keepdim=True)
|
312 |
+
|
313 |
+
else:
|
314 |
+
locations1 = torch.cumsum(top_1_mask, dim=0) - 1
|
315 |
+
locations2 = torch.cumsum(top_2_mask, dim=0) - 1
|
316 |
+
# Update 2nd's location by accounting for locations of 1st
|
317 |
+
locations2 += torch.sum(top_1_mask, dim=0, keepdim=True)
|
318 |
+
|
319 |
+
if not self.training and self.moe_eval_capacity_token_fraction > 0:
|
320 |
+
self.expert_capacity = math.ceil(self.moe_eval_capacity_token_fraction * nb_tokens)
|
321 |
+
else:
|
322 |
+
capacity = 2 * math.ceil(nb_tokens / self.num_experts)
|
323 |
+
self.expert_capacity = capacity if self.expert_capacity is None else self.expert_capacity
|
324 |
+
|
325 |
+
# Remove locations outside capacity from ( cumsum < capacity = False will not be routed)
|
326 |
+
top_1_mask = top_1_mask * torch.lt(locations1, self.expert_capacity)
|
327 |
+
top_2_mask = top_2_mask * torch.lt(locations2, self.expert_capacity)
|
328 |
+
|
329 |
+
if not self.normalize_router_prob_before_dropping:
|
330 |
+
top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(
|
331 |
+
router_probs, top_1_mask, top_2_mask
|
332 |
+
)
|
333 |
+
|
334 |
+
# Calculate combine_weights and dispatch_mask
|
335 |
+
gates1 = top_1_max_probs[:, None] * top_1_mask
|
336 |
+
gates2 = top_2_max_probs[:, None] * top_2_mask
|
337 |
+
router_probs = gates1 + gates2
|
338 |
+
|
339 |
+
return top_1_mask, router_probs
|
340 |
+
|
341 |
+
def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor] = None) -> Tuple:
|
342 |
+
r"""
|
343 |
+
The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for
|
344 |
+
each experts.)
|
345 |
+
|
346 |
+
Args:
|
347 |
+
hidden_states (`torch.Tensor`):
|
348 |
+
(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
|
349 |
+
Returns:
|
350 |
+
top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)):
|
351 |
+
Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token
|
352 |
+
using the top1 probabilities of the router.
|
353 |
+
router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)):
|
354 |
+
Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
|
355 |
+
token and expert. Used for routing tokens to experts.
|
356 |
+
router_logits (`torch.Tensor` of shape (batch_size, sequence_length))):
|
357 |
+
Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
|
358 |
+
This is used later for computing router z-loss.
|
359 |
+
"""
|
360 |
+
self.input_dtype = hidden_states.dtype
|
361 |
+
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
362 |
+
hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim)
|
363 |
+
hidden_states = hidden_states.to(self.dtype)
|
364 |
+
self._cast_classifier()
|
365 |
+
router_logits = self.classifier(hidden_states)
|
366 |
+
top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask)
|
367 |
+
return top_1_mask, router_probs
|
368 |
+
|
369 |
+
|
370 |
+
class NllbMoeDenseActDense(nn.Module):
|
371 |
+
def __init__(self, config: NllbMoeConfig, ffn_dim: int):
|
372 |
+
super().__init__()
|
373 |
+
self.fc1 = nn.Linear(config.d_model, ffn_dim)
|
374 |
+
self.fc2 = nn.Linear(ffn_dim, config.d_model)
|
375 |
+
self.dropout = nn.Dropout(config.activation_dropout)
|
376 |
+
self.act = ACT2FN[config.activation_function]
|
377 |
+
|
378 |
+
def forward(self, hidden_states):
|
379 |
+
hidden_states = self.fc1(hidden_states)
|
380 |
+
hidden_states = self.act(hidden_states)
|
381 |
+
hidden_states = self.dropout(hidden_states)
|
382 |
+
if (
|
383 |
+
isinstance(self.fc2.weight, torch.Tensor)
|
384 |
+
and hidden_states.dtype != self.fc2.weight.dtype
|
385 |
+
and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8)
|
386 |
+
):
|
387 |
+
hidden_states = hidden_states.to(self.fc2.weight.dtype)
|
388 |
+
hidden_states = self.fc2(hidden_states)
|
389 |
+
return hidden_states
|
390 |
+
|
391 |
+
|
392 |
+
class NllbMoeSparseMLP(nn.Module):
|
393 |
+
r"""
|
394 |
+
Implementation of the NLLB-MoE sparse MLP module.
|
395 |
+
"""
|
396 |
+
|
397 |
+
def __init__(self, config: NllbMoeConfig, ffn_dim: int, expert_class: nn.Module = NllbMoeDenseActDense):
|
398 |
+
super().__init__()
|
399 |
+
self.router = NllbMoeTop2Router(config)
|
400 |
+
self.moe_token_dropout = config.moe_token_dropout
|
401 |
+
self.token_dropout = nn.Dropout(self.moe_token_dropout)
|
402 |
+
self.num_experts = config.num_experts
|
403 |
+
|
404 |
+
self.experts = nn.ModuleDict()
|
405 |
+
for idx in range(self.num_experts):
|
406 |
+
self.experts[f"expert_{idx}"] = expert_class(config, ffn_dim)
|
407 |
+
|
408 |
+
def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor] = False):
|
409 |
+
r"""
|
410 |
+
The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense`
|
411 |
+
(mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a
|
412 |
+
top_2 gating mecanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim)
|
413 |
+
instead of O(num_experts x batch_size x sequence_length x hidden_dim).
|
414 |
+
|
415 |
+
1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length,
|
416 |
+
num_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the
|
417 |
+
`router_mask`.
|
418 |
+
|
419 |
+
2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the
|
420 |
+
contribution of each experts when updating the masked hidden states.
|
421 |
+
|
422 |
+
Args:
|
423 |
+
hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):
|
424 |
+
The hidden states
|
425 |
+
padding_mask (`torch.Tensor`, *optional*, defaults to `False`):
|
426 |
+
Attention mask. Can be in the causal form or not.
|
427 |
+
|
428 |
+
Returns:
|
429 |
+
hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):
|
430 |
+
Updated hidden states
|
431 |
+
router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`):
|
432 |
+
Needed for computing the loss
|
433 |
+
|
434 |
+
"""
|
435 |
+
batch_size, sequence_length, hidden_dim = hidden_states.shape
|
436 |
+
|
437 |
+
top_1_mask, router_probs = self.router(hidden_states, padding_mask)
|
438 |
+
router_mask = router_probs.bool()
|
439 |
+
hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim)
|
440 |
+
masked_hidden_states = torch.einsum("bm,be->ebm", hidden_states, router_mask)
|
441 |
+
for idx, expert in enumerate(self.experts.values()):
|
442 |
+
token_indices = router_mask[:, idx]
|
443 |
+
combining_weights = router_probs[token_indices, idx]
|
444 |
+
expert_output = expert(masked_hidden_states[idx, token_indices])
|
445 |
+
if self.moe_token_dropout > 0:
|
446 |
+
if self.training:
|
447 |
+
expert_output = self.token_dropout(expert_output)
|
448 |
+
else:
|
449 |
+
expert_output *= 1 - self.moe_token_dropout
|
450 |
+
masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output)
|
451 |
+
hidden_states = masked_hidden_states.sum(dim=0).reshape(batch_size, sequence_length, hidden_dim)
|
452 |
+
|
453 |
+
top_1_expert_index = torch.argmax(top_1_mask, dim=-1)
|
454 |
+
return hidden_states, (router_probs, top_1_expert_index)
|
455 |
+
|
456 |
+
|
457 |
+
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->NllbMoe,key_value_states->encoder_hidden_states
|
458 |
+
class NllbMoeAttention(nn.Module):
|
459 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
460 |
+
|
461 |
+
def __init__(
|
462 |
+
self,
|
463 |
+
embed_dim: int,
|
464 |
+
num_heads: int,
|
465 |
+
dropout: float = 0.0,
|
466 |
+
is_decoder: bool = False,
|
467 |
+
bias: bool = True,
|
468 |
+
is_causal: bool = False,
|
469 |
+
config: Optional[NllbMoeConfig] = None,
|
470 |
+
):
|
471 |
+
super().__init__()
|
472 |
+
self.embed_dim = embed_dim
|
473 |
+
self.num_heads = num_heads
|
474 |
+
self.dropout = dropout
|
475 |
+
self.head_dim = embed_dim // num_heads
|
476 |
+
self.config = config
|
477 |
+
|
478 |
+
if (self.head_dim * num_heads) != self.embed_dim:
|
479 |
+
raise ValueError(
|
480 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
481 |
+
f" and `num_heads`: {num_heads})."
|
482 |
+
)
|
483 |
+
self.scaling = self.head_dim**-0.5
|
484 |
+
self.is_decoder = is_decoder
|
485 |
+
self.is_causal = is_causal
|
486 |
+
|
487 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
488 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
489 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
490 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
491 |
+
|
492 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
493 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
494 |
+
|
495 |
+
def forward(
|
496 |
+
self,
|
497 |
+
hidden_states: torch.Tensor,
|
498 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
499 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
500 |
+
attention_mask: Optional[torch.Tensor] = None,
|
501 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
502 |
+
output_attentions: bool = False,
|
503 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
504 |
+
"""Input shape: Batch x Time x Channel"""
|
505 |
+
|
506 |
+
# if encoder_hidden_states are provided this layer is used as a cross-attention layer
|
507 |
+
# for the decoder
|
508 |
+
is_cross_attention = encoder_hidden_states is not None
|
509 |
+
|
510 |
+
bsz, tgt_len, _ = hidden_states.size()
|
511 |
+
|
512 |
+
# get query proj
|
513 |
+
query_states = self.q_proj(hidden_states) * self.scaling
|
514 |
+
# get key, value proj
|
515 |
+
# `past_key_value[0].shape[2] == encoder_hidden_states.shape[1]`
|
516 |
+
# is checking that the `sequence_length` of the `past_key_value` is the same as
|
517 |
+
# the provided `encoder_hidden_states` to support prefix tuning
|
518 |
+
if (
|
519 |
+
is_cross_attention
|
520 |
+
and past_key_value is not None
|
521 |
+
and past_key_value[0].shape[2] == encoder_hidden_states.shape[1]
|
522 |
+
):
|
523 |
+
# reuse k,v, cross_attentions
|
524 |
+
key_states = past_key_value[0]
|
525 |
+
value_states = past_key_value[1]
|
526 |
+
elif is_cross_attention:
|
527 |
+
# cross_attentions
|
528 |
+
key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
|
529 |
+
value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
|
530 |
+
elif past_key_value is not None:
|
531 |
+
# reuse k, v, self_attention
|
532 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
533 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
534 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
535 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
536 |
+
else:
|
537 |
+
# self_attention
|
538 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
539 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
540 |
+
|
541 |
+
if self.is_decoder:
|
542 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
543 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
544 |
+
# key/value_states (first "if" case)
|
545 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
546 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
547 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
548 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
549 |
+
past_key_value = (key_states, value_states)
|
550 |
+
|
551 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
552 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
553 |
+
key_states = key_states.reshape(*proj_shape)
|
554 |
+
value_states = value_states.reshape(*proj_shape)
|
555 |
+
|
556 |
+
src_len = key_states.size(1)
|
557 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
558 |
+
|
559 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
560 |
+
raise ValueError(
|
561 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
562 |
+
f" {attn_weights.size()}"
|
563 |
+
)
|
564 |
+
|
565 |
+
if attention_mask is not None:
|
566 |
+
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
567 |
+
raise ValueError(
|
568 |
+
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
569 |
+
)
|
570 |
+
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
|
571 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
572 |
+
|
573 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
574 |
+
|
575 |
+
if layer_head_mask is not None:
|
576 |
+
if layer_head_mask.size() != (self.num_heads,):
|
577 |
+
raise ValueError(
|
578 |
+
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
|
579 |
+
f" {layer_head_mask.size()}"
|
580 |
+
)
|
581 |
+
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
582 |
+
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
583 |
+
|
584 |
+
if output_attentions:
|
585 |
+
# this operation is a bit awkward, but it's required to
|
586 |
+
# make sure that attn_weights keeps its gradient.
|
587 |
+
# In order to do so, attn_weights have to be reshaped
|
588 |
+
# twice and have to be reused in the following
|
589 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
590 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
591 |
+
else:
|
592 |
+
attn_weights_reshaped = None
|
593 |
+
|
594 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
595 |
+
|
596 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
597 |
+
|
598 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
599 |
+
raise ValueError(
|
600 |
+
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
|
601 |
+
f" {attn_output.size()}"
|
602 |
+
)
|
603 |
+
|
604 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
605 |
+
attn_output = attn_output.transpose(1, 2)
|
606 |
+
|
607 |
+
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
|
608 |
+
# partitioned across GPUs when using tensor-parallelism.
|
609 |
+
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
610 |
+
|
611 |
+
attn_output = self.out_proj(attn_output)
|
612 |
+
|
613 |
+
return attn_output, attn_weights_reshaped, past_key_value
|
614 |
+
|
615 |
+
|
616 |
+
class NllbMoeEncoderLayer(nn.Module):
|
617 |
+
def __init__(self, config: NllbMoeConfig, is_sparse: bool = False):
|
618 |
+
super().__init__()
|
619 |
+
self.embed_dim = config.d_model
|
620 |
+
self.is_sparse = is_sparse
|
621 |
+
self.self_attn = NllbMoeAttention(
|
622 |
+
embed_dim=self.embed_dim,
|
623 |
+
num_heads=config.encoder_attention_heads,
|
624 |
+
dropout=config.attention_dropout,
|
625 |
+
)
|
626 |
+
self.attn_dropout = nn.Dropout(config.dropout)
|
627 |
+
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
628 |
+
if not self.is_sparse:
|
629 |
+
self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.encoder_ffn_dim)
|
630 |
+
else:
|
631 |
+
self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.encoder_ffn_dim)
|
632 |
+
self.ff_layer_norm = nn.LayerNorm(config.d_model)
|
633 |
+
self.ff_dropout = nn.Dropout(config.activation_dropout)
|
634 |
+
|
635 |
+
def forward(
|
636 |
+
self,
|
637 |
+
hidden_states: torch.Tensor,
|
638 |
+
attention_mask: torch.Tensor,
|
639 |
+
layer_head_mask: torch.Tensor,
|
640 |
+
output_attentions: bool = False,
|
641 |
+
output_router_logits: bool = False,
|
642 |
+
) -> torch.Tensor:
|
643 |
+
"""
|
644 |
+
Args:
|
645 |
+
hidden_states (`torch.FloatTensor`):
|
646 |
+
input to the layer of shape `(batch, seq_len, embed_dim)`
|
647 |
+
attention_mask (`torch.FloatTensor`):
|
648 |
+
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
|
649 |
+
large negative values.
|
650 |
+
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
|
651 |
+
`(encoder_attention_heads,)`.
|
652 |
+
output_attentions (`bool`, *optional*):
|
653 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
654 |
+
returned tensors for more detail.
|
655 |
+
"""
|
656 |
+
residual = hidden_states
|
657 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
658 |
+
hidden_states, attn_weights, _ = self.self_attn(
|
659 |
+
hidden_states=hidden_states,
|
660 |
+
attention_mask=attention_mask,
|
661 |
+
layer_head_mask=layer_head_mask,
|
662 |
+
output_attentions=output_attentions,
|
663 |
+
)
|
664 |
+
hidden_states = self.attn_dropout(hidden_states)
|
665 |
+
hidden_states = residual + hidden_states
|
666 |
+
|
667 |
+
residual = hidden_states
|
668 |
+
|
669 |
+
hidden_states = self.ff_layer_norm(hidden_states)
|
670 |
+
if self.is_sparse:
|
671 |
+
hidden_states, router_states = self.ffn(hidden_states, attention_mask)
|
672 |
+
else:
|
673 |
+
# router_states set to None to track which layers have None gradients.
|
674 |
+
hidden_states, router_states = self.ffn(hidden_states), None
|
675 |
+
|
676 |
+
hidden_states = self.ff_dropout(hidden_states)
|
677 |
+
|
678 |
+
hidden_states = residual + hidden_states
|
679 |
+
|
680 |
+
if hidden_states.dtype == torch.float16 and (
|
681 |
+
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
|
682 |
+
):
|
683 |
+
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
|
684 |
+
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
|
685 |
+
|
686 |
+
outputs = (hidden_states,)
|
687 |
+
|
688 |
+
if output_attentions:
|
689 |
+
outputs += (attn_weights,)
|
690 |
+
|
691 |
+
if output_router_logits:
|
692 |
+
outputs += (router_states,)
|
693 |
+
|
694 |
+
return outputs
|
695 |
+
|
696 |
+
|
697 |
+
class NllbMoeDecoderLayer(nn.Module):
|
698 |
+
def __init__(self, config: NllbMoeConfig, is_sparse: bool = False):
|
699 |
+
super().__init__()
|
700 |
+
self.embed_dim = config.d_model
|
701 |
+
self.is_sparse = is_sparse
|
702 |
+
self.self_attn = NllbMoeAttention(
|
703 |
+
embed_dim=self.embed_dim,
|
704 |
+
num_heads=config.decoder_attention_heads,
|
705 |
+
dropout=config.attention_dropout,
|
706 |
+
is_decoder=True,
|
707 |
+
)
|
708 |
+
self.dropout = config.dropout
|
709 |
+
self.activation_fn = ACT2FN[config.activation_function]
|
710 |
+
self.attn_dropout = nn.Dropout(config.dropout)
|
711 |
+
|
712 |
+
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
713 |
+
self.cross_attention = NllbMoeAttention(
|
714 |
+
self.embed_dim, config.decoder_attention_heads, config.attention_dropout, is_decoder=True
|
715 |
+
)
|
716 |
+
self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
|
717 |
+
if not self.is_sparse:
|
718 |
+
self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.decoder_ffn_dim)
|
719 |
+
else:
|
720 |
+
self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.decoder_ffn_dim)
|
721 |
+
self.ff_layer_norm = nn.LayerNorm(config.d_model)
|
722 |
+
self.ff_dropout = nn.Dropout(config.activation_dropout)
|
723 |
+
|
724 |
+
def forward(
|
725 |
+
self,
|
726 |
+
hidden_states: torch.Tensor,
|
727 |
+
attention_mask: Optional[torch.Tensor] = None,
|
728 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
729 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
730 |
+
layer_head_mask: Optional[torch.Tensor] = None,
|
731 |
+
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
|
732 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
733 |
+
output_attentions: Optional[bool] = False,
|
734 |
+
output_router_logits: Optional[bool] = False,
|
735 |
+
use_cache: Optional[bool] = True,
|
736 |
+
) -> torch.Tensor:
|
737 |
+
"""
|
738 |
+
Args:
|
739 |
+
hidden_states (`torch.FloatTensor`):
|
740 |
+
input to the layer of shape `(batch, seq_len, embed_dim)`
|
741 |
+
attention_mask (`torch.FloatTensor`):
|
742 |
+
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
|
743 |
+
large negative values.
|
744 |
+
encoder_hidden_states (`torch.FloatTensor`):
|
745 |
+
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
|
746 |
+
encoder_attention_mask (`torch.FloatTensor`):
|
747 |
+
encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
|
748 |
+
very large negative values.
|
749 |
+
layer_head_mask (`torch.FloatTensor`):
|
750 |
+
mask for attention heads in a given layer of size `(encoder_attention_heads,)`.
|
751 |
+
cross_attn_layer_head_mask (`torch.FloatTensor`):
|
752 |
+
mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`.
|
753 |
+
past_key_value (`Tuple(torch.FloatTensor)`):
|
754 |
+
cached past key and value projection states
|
755 |
+
output_attentions (`bool`, *optional*):
|
756 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
757 |
+
returned tensors for more detail.
|
758 |
+
"""
|
759 |
+
residual = hidden_states
|
760 |
+
hidden_states = self.self_attn_layer_norm(hidden_states)
|
761 |
+
|
762 |
+
# Self Attention
|
763 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
764 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
765 |
+
# add present self-attn cache to positions 1,2 of present_key_value tuple
|
766 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
767 |
+
hidden_states=hidden_states,
|
768 |
+
past_key_value=self_attn_past_key_value,
|
769 |
+
attention_mask=attention_mask,
|
770 |
+
layer_head_mask=layer_head_mask,
|
771 |
+
output_attentions=output_attentions,
|
772 |
+
)
|
773 |
+
hidden_states = self.attn_dropout(hidden_states)
|
774 |
+
hidden_states = residual + hidden_states
|
775 |
+
|
776 |
+
# Cross-Attention Block
|
777 |
+
cross_attn_present_key_value = None
|
778 |
+
cross_attn_weights = None
|
779 |
+
if encoder_hidden_states is not None:
|
780 |
+
residual = hidden_states
|
781 |
+
hidden_states = self.cross_attention_layer_norm(hidden_states)
|
782 |
+
|
783 |
+
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
|
784 |
+
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
|
785 |
+
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(
|
786 |
+
hidden_states=hidden_states,
|
787 |
+
encoder_hidden_states=encoder_hidden_states,
|
788 |
+
past_key_value=cross_attn_past_key_value,
|
789 |
+
attention_mask=encoder_attention_mask,
|
790 |
+
layer_head_mask=cross_attn_layer_head_mask,
|
791 |
+
output_attentions=output_attentions,
|
792 |
+
)
|
793 |
+
hidden_states = self.attn_dropout(hidden_states)
|
794 |
+
hidden_states = residual + hidden_states
|
795 |
+
|
796 |
+
# add cross-attn to positions 3,4 of present_key_value tuple
|
797 |
+
present_key_value += cross_attn_present_key_value
|
798 |
+
|
799 |
+
# Fully Connected
|
800 |
+
residual = hidden_states
|
801 |
+
|
802 |
+
hidden_states = self.ff_layer_norm(hidden_states)
|
803 |
+
if self.is_sparse:
|
804 |
+
hidden_states, router_states = self.ffn(hidden_states, attention_mask)
|
805 |
+
else:
|
806 |
+
hidden_states, router_states = self.ffn(hidden_states), None
|
807 |
+
|
808 |
+
hidden_states = self.ff_dropout(hidden_states)
|
809 |
+
|
810 |
+
hidden_states = residual + hidden_states
|
811 |
+
|
812 |
+
# clamp inf values to enable fp16 training
|
813 |
+
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
|
814 |
+
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
|
815 |
+
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
|
816 |
+
|
817 |
+
outputs = (hidden_states, present_key_value)
|
818 |
+
|
819 |
+
if output_attentions:
|
820 |
+
outputs += (self_attn_weights, cross_attn_weights)
|
821 |
+
|
822 |
+
if output_router_logits:
|
823 |
+
outputs += (router_states,)
|
824 |
+
|
825 |
+
return outputs
|
826 |
+
|
827 |
+
|
828 |
+
class NllbMoePreTrainedModel(PreTrainedModel):
|
829 |
+
config_class = NllbMoeConfig
|
830 |
+
base_model_prefix = "model"
|
831 |
+
supports_gradient_checkpointing = True
|
832 |
+
_no_split_modules = ["NllbMoeEncoderLayer", "NllbMoeDecoderLayer"]
|
833 |
+
|
834 |
+
def _init_weights(self, module):
|
835 |
+
"""Initialize the weights"""
|
836 |
+
std = self.config.init_std
|
837 |
+
if isinstance(module, nn.Linear):
|
838 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
839 |
+
if module.bias is not None:
|
840 |
+
module.bias.data.zero_()
|
841 |
+
elif isinstance(module, nn.Embedding):
|
842 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
843 |
+
if module.padding_idx is not None:
|
844 |
+
module.weight.data[module.padding_idx].zero_()
|
845 |
+
|
846 |
+
|
847 |
+
NLLB_MOE_START_DOCSTRING = r"""
|
848 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
849 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
850 |
+
etc.)
|
851 |
+
|
852 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
853 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
854 |
+
and behavior.
|
855 |
+
|
856 |
+
Parameters:
|
857 |
+
config ([`NllbMoeConfig`]):
|
858 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
859 |
+
load the weights associated with the model, only the configuration. Check out the
|
860 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
861 |
+
"""
|
862 |
+
|
863 |
+
NLLB_MOE_GENERATION_EXAMPLE = r"""
|
864 |
+
Translation example:
|
865 |
+
|
866 |
+
```python
|
867 |
+
>>> from transformers import AutoTokenizer, NllbMoeForConditionalGeneration
|
868 |
+
|
869 |
+
>>> model = NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b")
|
870 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b")
|
871 |
+
|
872 |
+
>>> text_to_translate = "Life is like a box of chocolates"
|
873 |
+
>>> model_inputs = tokenizer(text_to_translate, return_tensors="pt")
|
874 |
+
|
875 |
+
>>> # translate to French
|
876 |
+
>>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("eng_Latn"))
|
877 |
+
>>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
|
878 |
+
```
|
879 |
+
"""
|
880 |
+
|
881 |
+
NLLB_MOE_INPUTS_DOCSTRING = r"""
|
882 |
+
Args:
|
883 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
884 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
885 |
+
it.
|
886 |
+
|
887 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
888 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
889 |
+
|
890 |
+
[What are input IDs?](../glossary#input-ids)
|
891 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
892 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
893 |
+
|
894 |
+
- 1 for tokens that are **not masked**,
|
895 |
+
- 0 for tokens that are **masked**.
|
896 |
+
|
897 |
+
[What are attention masks?](../glossary#attention-mask)
|
898 |
+
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
899 |
+
Indices of decoder input sequence tokens in the vocabulary.
|
900 |
+
|
901 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
902 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
903 |
+
|
904 |
+
[What are decoder input IDs?](../glossary#decoder-input-ids)
|
905 |
+
|
906 |
+
NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
|
907 |
+
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
908 |
+
`past_key_values`).
|
909 |
+
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
|
910 |
+
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
|
911 |
+
be used by default.
|
912 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
913 |
+
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
|
914 |
+
|
915 |
+
- 1 indicates the head is **not masked**,
|
916 |
+
- 0 indicates the head is **masked**.
|
917 |
+
|
918 |
+
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
919 |
+
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
|
920 |
+
|
921 |
+
- 1 indicates the head is **not masked**,
|
922 |
+
- 0 indicates the head is **masked**.
|
923 |
+
|
924 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
925 |
+
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
|
926 |
+
1]`:
|
927 |
+
|
928 |
+
- 1 indicates the head is **not masked**,
|
929 |
+
- 0 indicates the head is **masked**.
|
930 |
+
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
|
931 |
+
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
|
932 |
+
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
|
933 |
+
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
|
934 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
935 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
936 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
937 |
+
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
938 |
+
|
939 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
940 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
941 |
+
|
942 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
943 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
944 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
945 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
946 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
947 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
948 |
+
than the model's internal embedding lookup matrix.
|
949 |
+
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
|
950 |
+
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
|
951 |
+
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
|
952 |
+
input (see `past_key_values`). This is useful if you want more control over how to convert
|
953 |
+
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
|
954 |
+
|
955 |
+
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
|
956 |
+
of `inputs_embeds`.
|
957 |
+
use_cache (`bool`, *optional*):
|
958 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
959 |
+
`past_key_values`).
|
960 |
+
output_attentions (`bool`, *optional*):
|
961 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
962 |
+
tensors for more detail.
|
963 |
+
output_hidden_states (`bool`, *optional*):
|
964 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
965 |
+
more detail.
|
966 |
+
output_router_logits (`bool`, *optional*):
|
967 |
+
Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
|
968 |
+
should not be returned during inference.
|
969 |
+
return_dict (`bool`, *optional*):
|
970 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
971 |
+
"""
|
972 |
+
|
973 |
+
|
974 |
+
class NllbMoeEncoder(NllbMoePreTrainedModel):
|
975 |
+
"""
|
976 |
+
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
|
977 |
+
[`NllbMoeEncoderLayer`].
|
978 |
+
|
979 |
+
Args:
|
980 |
+
config:
|
981 |
+
NllbMoeConfig
|
982 |
+
embed_tokens (nn.Embedding):
|
983 |
+
output embedding
|
984 |
+
"""
|
985 |
+
|
986 |
+
def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None):
|
987 |
+
super().__init__(config)
|
988 |
+
|
989 |
+
self.dropout = config.dropout
|
990 |
+
self.layerdrop = config.encoder_layerdrop
|
991 |
+
|
992 |
+
embed_dim = config.d_model
|
993 |
+
self.padding_idx = config.pad_token_id
|
994 |
+
self.max_source_positions = config.max_position_embeddings
|
995 |
+
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
|
996 |
+
|
997 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
|
998 |
+
|
999 |
+
if embed_tokens is not None:
|
1000 |
+
self.embed_tokens.weight = embed_tokens.weight
|
1001 |
+
|
1002 |
+
self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(
|
1003 |
+
config.max_position_embeddings,
|
1004 |
+
embed_dim,
|
1005 |
+
self.padding_idx,
|
1006 |
+
)
|
1007 |
+
sparse_step = config.encoder_sparse_step
|
1008 |
+
self.layers = nn.ModuleList()
|
1009 |
+
for i in range(config.encoder_layers):
|
1010 |
+
is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False
|
1011 |
+
self.layers.append(NllbMoeEncoderLayer(config, is_sparse))
|
1012 |
+
|
1013 |
+
self.layer_norm = nn.LayerNorm(config.d_model)
|
1014 |
+
|
1015 |
+
self.gradient_checkpointing = False
|
1016 |
+
# Initialize weights and apply final processing
|
1017 |
+
self.post_init()
|
1018 |
+
|
1019 |
+
def forward(
|
1020 |
+
self,
|
1021 |
+
input_ids: Optional[torch.Tensor] = None,
|
1022 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1023 |
+
head_mask: Optional[torch.Tensor] = None,
|
1024 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1025 |
+
output_attentions: Optional[bool] = None,
|
1026 |
+
output_hidden_states: Optional[bool] = None,
|
1027 |
+
output_router_logits: Optional[bool] = None,
|
1028 |
+
return_dict: Optional[bool] = None,
|
1029 |
+
):
|
1030 |
+
r"""
|
1031 |
+
Args:
|
1032 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
1033 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
1034 |
+
provide it.
|
1035 |
+
|
1036 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1037 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1038 |
+
|
1039 |
+
[What are input IDs?](../glossary#input-ids)
|
1040 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1041 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
1042 |
+
|
1043 |
+
- 1 for tokens that are **not masked**,
|
1044 |
+
- 0 for tokens that are **masked**.
|
1045 |
+
|
1046 |
+
[What are attention masks?](../glossary#attention-mask)
|
1047 |
+
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
|
1048 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
1049 |
+
|
1050 |
+
- 1 indicates the head is **not masked**,
|
1051 |
+
- 0 indicates the head is **masked**.
|
1052 |
+
|
1053 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1054 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
1055 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
1056 |
+
than the model's internal embedding lookup matrix.
|
1057 |
+
output_attentions (`bool`, *optional*):
|
1058 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
1059 |
+
returned tensors for more detail.
|
1060 |
+
output_hidden_states (`bool`, *optional*):
|
1061 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
1062 |
+
for more detail.
|
1063 |
+
output_router_logits (`bool`, *optional*):
|
1064 |
+
Whether or not to return the logits of all the routers. They are useful for computing the router loss,
|
1065 |
+
and should not be returned during inference.
|
1066 |
+
return_dict (`bool`, *optional*):
|
1067 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1068 |
+
"""
|
1069 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1070 |
+
output_hidden_states = (
|
1071 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1072 |
+
)
|
1073 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1074 |
+
|
1075 |
+
# retrieve input_ids and inputs_embeds
|
1076 |
+
if input_ids is not None and inputs_embeds is not None:
|
1077 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
1078 |
+
elif input_ids is not None:
|
1079 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
1080 |
+
input_shape = input_ids.size()
|
1081 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
1082 |
+
elif inputs_embeds is not None:
|
1083 |
+
input_shape = inputs_embeds.size()[:-1]
|
1084 |
+
else:
|
1085 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1086 |
+
|
1087 |
+
if inputs_embeds is None:
|
1088 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
1089 |
+
|
1090 |
+
embed_pos = self.embed_positions(input_ids, inputs_embeds)
|
1091 |
+
embed_pos = embed_pos.to(inputs_embeds.device)
|
1092 |
+
|
1093 |
+
hidden_states = inputs_embeds + embed_pos
|
1094 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
1095 |
+
|
1096 |
+
# expand attention_mask
|
1097 |
+
if attention_mask is not None:
|
1098 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
1099 |
+
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
|
1100 |
+
|
1101 |
+
encoder_states = () if output_hidden_states else None
|
1102 |
+
all_router_probs = () if output_router_logits else None
|
1103 |
+
all_attentions = () if output_attentions else None
|
1104 |
+
|
1105 |
+
# check if head_mask has a correct number of layers specified if desired
|
1106 |
+
if head_mask is not None:
|
1107 |
+
if head_mask.size()[0] != len(self.layers):
|
1108 |
+
raise ValueError(
|
1109 |
+
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
|
1110 |
+
f" {head_mask.size()[0]}."
|
1111 |
+
)
|
1112 |
+
|
1113 |
+
for idx, encoder_layer in enumerate(self.layers):
|
1114 |
+
if output_hidden_states:
|
1115 |
+
encoder_states = encoder_states + (hidden_states,)
|
1116 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
1117 |
+
dropout_probability = torch.rand([])
|
1118 |
+
if self.training and (dropout_probability < self.layerdrop): # skip the layer
|
1119 |
+
layer_outputs = (None, None, None)
|
1120 |
+
else:
|
1121 |
+
if self.gradient_checkpointing and self.training:
|
1122 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1123 |
+
encoder_layer.__call__,
|
1124 |
+
hidden_states,
|
1125 |
+
attention_mask,
|
1126 |
+
(head_mask[idx] if head_mask is not None else None),
|
1127 |
+
output_attentions,
|
1128 |
+
)
|
1129 |
+
else:
|
1130 |
+
layer_outputs = encoder_layer(
|
1131 |
+
hidden_states,
|
1132 |
+
attention_mask,
|
1133 |
+
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
|
1134 |
+
output_attentions=output_attentions,
|
1135 |
+
output_router_logits=output_router_logits,
|
1136 |
+
)
|
1137 |
+
|
1138 |
+
hidden_states = layer_outputs[0]
|
1139 |
+
|
1140 |
+
if output_attentions:
|
1141 |
+
all_attentions += (layer_outputs[1],)
|
1142 |
+
|
1143 |
+
if output_router_logits:
|
1144 |
+
all_router_probs += (layer_outputs[-1],)
|
1145 |
+
|
1146 |
+
last_hidden_state = self.layer_norm(hidden_states)
|
1147 |
+
|
1148 |
+
if output_hidden_states:
|
1149 |
+
encoder_states += (last_hidden_state,)
|
1150 |
+
|
1151 |
+
if not return_dict:
|
1152 |
+
return tuple(
|
1153 |
+
v for v in [last_hidden_state, encoder_states, all_attentions, all_router_probs] if v is not None
|
1154 |
+
)
|
1155 |
+
|
1156 |
+
return MoEModelOutput(
|
1157 |
+
last_hidden_state=last_hidden_state,
|
1158 |
+
hidden_states=encoder_states,
|
1159 |
+
attentions=all_attentions,
|
1160 |
+
router_probs=all_router_probs,
|
1161 |
+
)
|
1162 |
+
|
1163 |
+
|
1164 |
+
class NllbMoeDecoder(NllbMoePreTrainedModel):
|
1165 |
+
"""
|
1166 |
+
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`NllbMoeDecoderLayer`]
|
1167 |
+
|
1168 |
+
Args:
|
1169 |
+
config:
|
1170 |
+
NllbMoeConfig
|
1171 |
+
embed_tokens (nn.Embedding):
|
1172 |
+
output embedding
|
1173 |
+
"""
|
1174 |
+
|
1175 |
+
def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None):
|
1176 |
+
super().__init__(config)
|
1177 |
+
self.dropout = config.dropout
|
1178 |
+
self.layerdrop = config.decoder_layerdrop
|
1179 |
+
self.padding_idx = config.pad_token_id
|
1180 |
+
self.max_target_positions = config.max_position_embeddings
|
1181 |
+
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
|
1182 |
+
|
1183 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
|
1184 |
+
|
1185 |
+
if embed_tokens is not None:
|
1186 |
+
self.embed_tokens.weight = embed_tokens.weight
|
1187 |
+
|
1188 |
+
self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(
|
1189 |
+
config.max_position_embeddings,
|
1190 |
+
config.d_model,
|
1191 |
+
self.padding_idx,
|
1192 |
+
)
|
1193 |
+
|
1194 |
+
sparse_step = config.decoder_sparse_step
|
1195 |
+
self.layers = nn.ModuleList()
|
1196 |
+
for i in range(config.decoder_layers):
|
1197 |
+
is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False
|
1198 |
+
self.layers.append(NllbMoeDecoderLayer(config, is_sparse))
|
1199 |
+
|
1200 |
+
self.layer_norm = nn.LayerNorm(config.d_model)
|
1201 |
+
|
1202 |
+
self.gradient_checkpointing = False
|
1203 |
+
# Initialize weights and apply final processing
|
1204 |
+
self.post_init()
|
1205 |
+
|
1206 |
+
def forward(
|
1207 |
+
self,
|
1208 |
+
input_ids: Optional[torch.Tensor] = None,
|
1209 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1210 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1211 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
1212 |
+
head_mask: Optional[torch.Tensor] = None,
|
1213 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
1214 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1215 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1216 |
+
use_cache: Optional[bool] = None,
|
1217 |
+
output_attentions: Optional[bool] = None,
|
1218 |
+
output_hidden_states: Optional[bool] = None,
|
1219 |
+
output_router_logits: Optional[bool] = None,
|
1220 |
+
return_dict: Optional[bool] = None,
|
1221 |
+
):
|
1222 |
+
r"""
|
1223 |
+
Args:
|
1224 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
1225 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
|
1226 |
+
provide it.
|
1227 |
+
|
1228 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1229 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1230 |
+
|
1231 |
+
[What are input IDs?](../glossary#input-ids)
|
1232 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1233 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
1234 |
+
|
1235 |
+
- 1 for tokens that are **not masked**,
|
1236 |
+
- 0 for tokens that are **masked**.
|
1237 |
+
|
1238 |
+
[What are attention masks?](../glossary#attention-mask)
|
1239 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
|
1240 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
|
1241 |
+
of the decoder.
|
1242 |
+
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
|
1243 |
+
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
|
1244 |
+
selected in `[0, 1]`:
|
1245 |
+
|
1246 |
+
- 1 for tokens that are **not masked**,
|
1247 |
+
- 0 for tokens that are **masked**.
|
1248 |
+
|
1249 |
+
[What are attention masks?](../glossary#attention-mask)
|
1250 |
+
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
1251 |
+
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
|
1252 |
+
|
1253 |
+
- 1 indicates the head is **not masked**,
|
1254 |
+
- 0 indicates the head is **masked**.
|
1255 |
+
|
1256 |
+
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
|
1257 |
+
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
|
1258 |
+
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
|
1259 |
+
|
1260 |
+
- 1 indicates the head is **not masked**,
|
1261 |
+
- 0 indicates the head is **masked**.
|
1262 |
+
|
1263 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
1264 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
1265 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
1266 |
+
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
1267 |
+
|
1268 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
|
1269 |
+
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
1270 |
+
|
1271 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
|
1272 |
+
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
|
1273 |
+
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1274 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1275 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
1276 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
1277 |
+
than the model's internal embedding lookup matrix.
|
1278 |
+
output_attentions (`bool`, *optional*):
|
1279 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
1280 |
+
returned tensors for more detail.
|
1281 |
+
output_hidden_states (`bool`, *optional*):
|
1282 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
1283 |
+
for more detail.
|
1284 |
+
output_router_logits (`bool`, *optional*):
|
1285 |
+
Whether or not to return the logits of all the routers. They are useful for computing the router loss,
|
1286 |
+
and should not be returned during inference.
|
1287 |
+
return_dict (`bool`, *optional*):
|
1288 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1289 |
+
"""
|
1290 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1291 |
+
output_hidden_states = (
|
1292 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1293 |
+
)
|
1294 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1295 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1296 |
+
|
1297 |
+
# retrieve input_ids and inputs_embeds
|
1298 |
+
if input_ids is not None and inputs_embeds is not None:
|
1299 |
+
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
1300 |
+
elif input_ids is not None:
|
1301 |
+
input_shape = input_ids.size()
|
1302 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
1303 |
+
elif inputs_embeds is not None:
|
1304 |
+
input_shape = inputs_embeds.size()[:-1]
|
1305 |
+
else:
|
1306 |
+
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
1307 |
+
|
1308 |
+
# past_key_values_length
|
1309 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
1310 |
+
|
1311 |
+
if inputs_embeds is None:
|
1312 |
+
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
|
1313 |
+
|
1314 |
+
# create causal mask
|
1315 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
1316 |
+
combined_attention_mask = _prepare_4d_causal_attention_mask(
|
1317 |
+
attention_mask, input_shape, inputs_embeds, past_key_values_length
|
1318 |
+
)
|
1319 |
+
|
1320 |
+
# expand encoder attention mask
|
1321 |
+
if encoder_hidden_states is not None and encoder_attention_mask is not None:
|
1322 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
1323 |
+
encoder_attention_mask = _prepare_4d_attention_mask(
|
1324 |
+
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
|
1325 |
+
)
|
1326 |
+
|
1327 |
+
# embed positions
|
1328 |
+
positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
|
1329 |
+
positions = positions.to(inputs_embeds.device)
|
1330 |
+
|
1331 |
+
hidden_states = inputs_embeds + positions
|
1332 |
+
|
1333 |
+
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
1334 |
+
|
1335 |
+
if self.gradient_checkpointing and self.training:
|
1336 |
+
if use_cache:
|
1337 |
+
logger.warning_once(
|
1338 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting" " `use_cache=False`..."
|
1339 |
+
)
|
1340 |
+
use_cache = False
|
1341 |
+
|
1342 |
+
# decoder layers
|
1343 |
+
all_hidden_states = () if output_hidden_states else None
|
1344 |
+
all_self_attns = () if output_attentions else None
|
1345 |
+
all_router_probs = () if output_router_logits else None
|
1346 |
+
all_cross_attentions = () if output_attentions else None
|
1347 |
+
present_key_value_states = () if use_cache else None
|
1348 |
+
|
1349 |
+
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
|
1350 |
+
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
|
1351 |
+
if attn_mask is not None:
|
1352 |
+
if attn_mask.size()[0] != len(self.layers):
|
1353 |
+
raise ValueError(
|
1354 |
+
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
|
1355 |
+
f" {head_mask.size()[0]}."
|
1356 |
+
)
|
1357 |
+
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
|
1358 |
+
|
1359 |
+
for idx, decoder_layer in enumerate(self.layers):
|
1360 |
+
if output_hidden_states:
|
1361 |
+
all_hidden_states += (hidden_states,)
|
1362 |
+
|
1363 |
+
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
|
1364 |
+
dropout_probability = torch.rand([])
|
1365 |
+
|
1366 |
+
skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False
|
1367 |
+
if not skip_the_layer or deepspeed_zero3_is_enabled:
|
1368 |
+
layer_head_mask = head_mask[idx] if head_mask is not None else None
|
1369 |
+
cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
|
1370 |
+
|
1371 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
1372 |
+
|
1373 |
+
# under deepspeed zero3 all gpus must run in sync
|
1374 |
+
if self.gradient_checkpointing and self.training:
|
1375 |
+
if use_cache:
|
1376 |
+
logger.warning_once(
|
1377 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
1378 |
+
)
|
1379 |
+
use_cache = False
|
1380 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1381 |
+
decoder_layer.forward,
|
1382 |
+
hidden_states,
|
1383 |
+
combined_attention_mask,
|
1384 |
+
encoder_hidden_states,
|
1385 |
+
encoder_attention_mask,
|
1386 |
+
layer_head_mask,
|
1387 |
+
cross_attn_layer_head_mask,
|
1388 |
+
None, # past_key_value is always None with gradient checkpointing
|
1389 |
+
use_cache,
|
1390 |
+
output_attentions,
|
1391 |
+
)
|
1392 |
+
else:
|
1393 |
+
layer_outputs = decoder_layer(
|
1394 |
+
hidden_states,
|
1395 |
+
attention_mask=combined_attention_mask,
|
1396 |
+
encoder_hidden_states=encoder_hidden_states,
|
1397 |
+
encoder_attention_mask=encoder_attention_mask,
|
1398 |
+
layer_head_mask=layer_head_mask,
|
1399 |
+
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
|
1400 |
+
past_key_value=past_key_value,
|
1401 |
+
use_cache=use_cache,
|
1402 |
+
output_attentions=output_attentions,
|
1403 |
+
output_router_logits=output_router_logits,
|
1404 |
+
)
|
1405 |
+
|
1406 |
+
hidden_states = layer_outputs[0]
|
1407 |
+
|
1408 |
+
if skip_the_layer:
|
1409 |
+
continue
|
1410 |
+
|
1411 |
+
if use_cache:
|
1412 |
+
present_key_value_states += (layer_outputs[1],)
|
1413 |
+
|
1414 |
+
if output_attentions:
|
1415 |
+
all_self_attns += (layer_outputs[2],)
|
1416 |
+
all_cross_attentions += (layer_outputs[3],)
|
1417 |
+
|
1418 |
+
if output_router_logits:
|
1419 |
+
all_router_probs += (layer_outputs[-1],)
|
1420 |
+
|
1421 |
+
hidden_states = self.layer_norm(hidden_states)
|
1422 |
+
|
1423 |
+
# Add last layer
|
1424 |
+
if output_hidden_states:
|
1425 |
+
all_hidden_states += (hidden_states,)
|
1426 |
+
|
1427 |
+
if not return_dict:
|
1428 |
+
return tuple(
|
1429 |
+
v
|
1430 |
+
for v in [
|
1431 |
+
hidden_states,
|
1432 |
+
present_key_value_states,
|
1433 |
+
all_hidden_states,
|
1434 |
+
all_self_attns,
|
1435 |
+
all_cross_attentions,
|
1436 |
+
all_router_probs,
|
1437 |
+
]
|
1438 |
+
if v is not None
|
1439 |
+
)
|
1440 |
+
return MoEModelOutputWithPastAndCrossAttentions(
|
1441 |
+
last_hidden_state=hidden_states,
|
1442 |
+
past_key_values=present_key_value_states,
|
1443 |
+
hidden_states=all_hidden_states,
|
1444 |
+
attentions=all_self_attns,
|
1445 |
+
cross_attentions=all_cross_attentions,
|
1446 |
+
router_probs=all_router_probs,
|
1447 |
+
)
|
1448 |
+
|
1449 |
+
|
1450 |
+
@add_start_docstrings(
|
1451 |
+
"The bare NllbMoe Model outputting raw hidden-states without any specific head on top.",
|
1452 |
+
NLLB_MOE_START_DOCSTRING,
|
1453 |
+
)
|
1454 |
+
class NllbMoeModel(NllbMoePreTrainedModel):
|
1455 |
+
_tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
|
1456 |
+
|
1457 |
+
def __init__(self, config: NllbMoeConfig):
|
1458 |
+
super().__init__(config)
|
1459 |
+
|
1460 |
+
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
|
1461 |
+
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
|
1462 |
+
|
1463 |
+
self.encoder = NllbMoeEncoder(config, self.shared)
|
1464 |
+
self.decoder = NllbMoeDecoder(config, self.shared)
|
1465 |
+
|
1466 |
+
# Initialize weights and apply final processing
|
1467 |
+
self.post_init()
|
1468 |
+
|
1469 |
+
def get_input_embeddings(self):
|
1470 |
+
return self.shared
|
1471 |
+
|
1472 |
+
def set_input_embeddings(self, value):
|
1473 |
+
self.shared = value
|
1474 |
+
self.encoder.embed_tokens = self.shared
|
1475 |
+
self.decoder.embed_tokens = self.shared
|
1476 |
+
|
1477 |
+
def _tie_weights(self):
|
1478 |
+
if self.config.tie_word_embeddings:
|
1479 |
+
self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
|
1480 |
+
self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
|
1481 |
+
|
1482 |
+
def get_encoder(self):
|
1483 |
+
return self.encoder
|
1484 |
+
|
1485 |
+
def get_decoder(self):
|
1486 |
+
return self.decoder
|
1487 |
+
|
1488 |
+
@add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
|
1489 |
+
@add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
|
1490 |
+
@replace_return_docstrings(output_type=Seq2SeqMoEModelOutput, config_class=_CONFIG_FOR_DOC)
|
1491 |
+
def forward(
|
1492 |
+
self,
|
1493 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1494 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1495 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
1496 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
1497 |
+
head_mask: Optional[torch.Tensor] = None,
|
1498 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
1499 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
1500 |
+
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
1501 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
1502 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1503 |
+
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1504 |
+
use_cache: Optional[bool] = None,
|
1505 |
+
output_attentions: Optional[bool] = None,
|
1506 |
+
output_hidden_states: Optional[bool] = None,
|
1507 |
+
output_router_logits: Optional[bool] = None,
|
1508 |
+
return_dict: Optional[bool] = None,
|
1509 |
+
) -> Union[Tuple[torch.Tensor], Seq2SeqMoEModelOutput]:
|
1510 |
+
r"""
|
1511 |
+
Returns:
|
1512 |
+
|
1513 |
+
Example:
|
1514 |
+
|
1515 |
+
```python
|
1516 |
+
>>> from transformers import AutoTokenizer, NllbMoeModel
|
1517 |
+
|
1518 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts")
|
1519 |
+
>>> model = SwitchTransformersModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts")
|
1520 |
+
|
1521 |
+
>>> input_ids = tokenizer(
|
1522 |
+
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
|
1523 |
+
... ).input_ids # Batch size 1
|
1524 |
+
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
|
1525 |
+
|
1526 |
+
>>> # preprocess: Prepend decoder_input_ids with start token which is pad token for NllbMoeModel
|
1527 |
+
>>> decoder_input_ids = model._shift_right(decoder_input_ids)
|
1528 |
+
|
1529 |
+
>>> # forward pass
|
1530 |
+
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
|
1531 |
+
>>> last_hidden_states = outputs.last_hidden_state
|
1532 |
+
```"""
|
1533 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1534 |
+
if encoder_outputs is None:
|
1535 |
+
encoder_outputs = self.encoder(
|
1536 |
+
input_ids=input_ids,
|
1537 |
+
attention_mask=attention_mask,
|
1538 |
+
head_mask=head_mask,
|
1539 |
+
inputs_embeds=inputs_embeds,
|
1540 |
+
output_attentions=output_attentions,
|
1541 |
+
output_hidden_states=output_hidden_states,
|
1542 |
+
output_router_logits=output_router_logits,
|
1543 |
+
return_dict=return_dict,
|
1544 |
+
)
|
1545 |
+
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
|
1546 |
+
elif return_dict and not isinstance(encoder_outputs, MoEModelOutput):
|
1547 |
+
encoder_outputs = MoEModelOutput(
|
1548 |
+
last_hidden_state=encoder_outputs[0],
|
1549 |
+
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
|
1550 |
+
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
|
1551 |
+
router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None,
|
1552 |
+
)
|
1553 |
+
|
1554 |
+
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
|
1555 |
+
decoder_outputs = self.decoder(
|
1556 |
+
input_ids=decoder_input_ids,
|
1557 |
+
attention_mask=decoder_attention_mask,
|
1558 |
+
encoder_hidden_states=encoder_outputs[0],
|
1559 |
+
encoder_attention_mask=attention_mask,
|
1560 |
+
head_mask=decoder_head_mask,
|
1561 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1562 |
+
past_key_values=past_key_values,
|
1563 |
+
inputs_embeds=decoder_inputs_embeds,
|
1564 |
+
use_cache=use_cache,
|
1565 |
+
output_attentions=output_attentions,
|
1566 |
+
output_hidden_states=output_hidden_states,
|
1567 |
+
output_router_logits=output_router_logits,
|
1568 |
+
return_dict=return_dict,
|
1569 |
+
)
|
1570 |
+
|
1571 |
+
if not return_dict:
|
1572 |
+
return decoder_outputs + encoder_outputs
|
1573 |
+
|
1574 |
+
return Seq2SeqMoEModelOutput(
|
1575 |
+
past_key_values=decoder_outputs.past_key_values,
|
1576 |
+
cross_attentions=decoder_outputs.cross_attentions,
|
1577 |
+
last_hidden_state=decoder_outputs.last_hidden_state,
|
1578 |
+
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
1579 |
+
encoder_hidden_states=encoder_outputs.hidden_states,
|
1580 |
+
decoder_hidden_states=decoder_outputs.hidden_states,
|
1581 |
+
encoder_attentions=encoder_outputs.attentions,
|
1582 |
+
decoder_attentions=decoder_outputs.attentions,
|
1583 |
+
encoder_router_logits=encoder_outputs.router_probs,
|
1584 |
+
decoder_router_logits=decoder_outputs.router_probs,
|
1585 |
+
)
|
1586 |
+
|
1587 |
+
|
1588 |
+
@add_start_docstrings(
|
1589 |
+
"The NllbMoe Model with a language modeling head. Can be used for summarization.", NLLB_MOE_START_DOCSTRING
|
1590 |
+
)
|
1591 |
+
class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel):
|
1592 |
+
base_model_prefix = "model"
|
1593 |
+
_tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
|
1594 |
+
|
1595 |
+
def __init__(self, config: NllbMoeConfig):
|
1596 |
+
super().__init__(config)
|
1597 |
+
self.model = NllbMoeModel(config)
|
1598 |
+
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
|
1599 |
+
|
1600 |
+
self.router_z_loss_coef = config.router_z_loss_coef
|
1601 |
+
self.router_aux_loss_coef = config.router_aux_loss_coef
|
1602 |
+
# Initialize weights and apply final processing
|
1603 |
+
self.post_init()
|
1604 |
+
|
1605 |
+
def get_encoder(self):
|
1606 |
+
return self.model.get_encoder()
|
1607 |
+
|
1608 |
+
def get_decoder(self):
|
1609 |
+
return self.model.get_decoder()
|
1610 |
+
|
1611 |
+
def get_output_embeddings(self):
|
1612 |
+
return self.lm_head
|
1613 |
+
|
1614 |
+
def set_output_embeddings(self, new_embeddings):
|
1615 |
+
self.lm_head = new_embeddings
|
1616 |
+
|
1617 |
+
@add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
|
1618 |
+
@replace_return_docstrings(output_type=Seq2SeqMoEOutput, config_class=_CONFIG_FOR_DOC)
|
1619 |
+
@add_end_docstrings(NLLB_MOE_GENERATION_EXAMPLE)
|
1620 |
+
def forward(
|
1621 |
+
self,
|
1622 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1623 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1624 |
+
decoder_input_ids: Optional[torch.LongTensor] = None,
|
1625 |
+
decoder_attention_mask: Optional[torch.LongTensor] = None,
|
1626 |
+
head_mask: Optional[torch.Tensor] = None,
|
1627 |
+
decoder_head_mask: Optional[torch.Tensor] = None,
|
1628 |
+
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
1629 |
+
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
1630 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
1631 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1632 |
+
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
1633 |
+
labels: Optional[torch.LongTensor] = None,
|
1634 |
+
use_cache: Optional[bool] = None,
|
1635 |
+
output_attentions: Optional[bool] = None,
|
1636 |
+
output_hidden_states: Optional[bool] = None,
|
1637 |
+
output_router_logits: Optional[bool] = None,
|
1638 |
+
return_dict: Optional[bool] = None,
|
1639 |
+
) -> Union[Tuple[torch.Tensor], Seq2SeqMoEOutput]:
|
1640 |
+
r"""
|
1641 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1642 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1643 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1644 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1645 |
+
|
1646 |
+
Returns:
|
1647 |
+
"""
|
1648 |
+
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
1649 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1650 |
+
output_router_logits = (
|
1651 |
+
output_router_logits if output_router_logits is not None else self.config.output_router_logits
|
1652 |
+
)
|
1653 |
+
if labels is not None:
|
1654 |
+
if decoder_input_ids is None:
|
1655 |
+
decoder_input_ids = shift_tokens_right(
|
1656 |
+
labels, self.config.pad_token_id, self.config.decoder_start_token_id
|
1657 |
+
)
|
1658 |
+
|
1659 |
+
outputs = self.model(
|
1660 |
+
input_ids,
|
1661 |
+
attention_mask=attention_mask,
|
1662 |
+
decoder_input_ids=decoder_input_ids,
|
1663 |
+
encoder_outputs=encoder_outputs,
|
1664 |
+
decoder_attention_mask=decoder_attention_mask,
|
1665 |
+
head_mask=head_mask,
|
1666 |
+
decoder_head_mask=decoder_head_mask,
|
1667 |
+
cross_attn_head_mask=cross_attn_head_mask,
|
1668 |
+
past_key_values=past_key_values,
|
1669 |
+
inputs_embeds=inputs_embeds,
|
1670 |
+
decoder_inputs_embeds=decoder_inputs_embeds,
|
1671 |
+
use_cache=use_cache,
|
1672 |
+
output_attentions=output_attentions,
|
1673 |
+
output_hidden_states=output_hidden_states,
|
1674 |
+
output_router_logits=output_router_logits,
|
1675 |
+
return_dict=return_dict,
|
1676 |
+
)
|
1677 |
+
lm_logits = self.lm_head(outputs[0])
|
1678 |
+
|
1679 |
+
loss = None
|
1680 |
+
encoder_aux_loss = None
|
1681 |
+
decoder_aux_loss = None
|
1682 |
+
|
1683 |
+
if labels is not None:
|
1684 |
+
loss_fct = CrossEntropyLoss(ignore_index=-100)
|
1685 |
+
# todo check in the config if router loss enables
|
1686 |
+
|
1687 |
+
if output_router_logits:
|
1688 |
+
encoder_router_logits = outputs[-1]
|
1689 |
+
decoder_router_logits = outputs[3 if output_attentions else 4]
|
1690 |
+
|
1691 |
+
# Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder
|
1692 |
+
encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_router_logits)
|
1693 |
+
encoder_aux_loss = load_balancing_loss_func(encoder_router_logits, encoder_expert_indexes)
|
1694 |
+
|
1695 |
+
decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_router_logits)
|
1696 |
+
decoder_aux_loss = load_balancing_loss_func(decoder_router_logits, decoder_expert_indexes)
|
1697 |
+
|
1698 |
+
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
|
1699 |
+
|
1700 |
+
if output_router_logits and labels is not None:
|
1701 |
+
aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss)
|
1702 |
+
loss = loss + aux_loss
|
1703 |
+
|
1704 |
+
output = (loss,) if loss is not None else ()
|
1705 |
+
if not return_dict:
|
1706 |
+
output += (lm_logits,)
|
1707 |
+
if output_router_logits: # only return the loss if they are not None
|
1708 |
+
output += (
|
1709 |
+
encoder_aux_loss,
|
1710 |
+
decoder_aux_loss,
|
1711 |
+
*outputs[1:],
|
1712 |
+
)
|
1713 |
+
else:
|
1714 |
+
output += outputs[1:]
|
1715 |
+
|
1716 |
+
return output
|
1717 |
+
|
1718 |
+
return Seq2SeqMoEOutput(
|
1719 |
+
loss=loss,
|
1720 |
+
logits=lm_logits,
|
1721 |
+
past_key_values=outputs.past_key_values,
|
1722 |
+
cross_attentions=outputs.cross_attentions,
|
1723 |
+
encoder_aux_loss=encoder_aux_loss,
|
1724 |
+
decoder_aux_loss=decoder_aux_loss,
|
1725 |
+
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
|
1726 |
+
encoder_hidden_states=outputs.encoder_hidden_states,
|
1727 |
+
decoder_hidden_states=outputs.decoder_hidden_states,
|
1728 |
+
encoder_attentions=outputs.encoder_attentions,
|
1729 |
+
decoder_attentions=outputs.decoder_attentions,
|
1730 |
+
encoder_router_logits=outputs.encoder_router_logits,
|
1731 |
+
decoder_router_logits=outputs.decoder_router_logits,
|
1732 |
+
)
|
1733 |
+
|
1734 |
+
def _unpack_router_logits(self, router_outputs):
|
1735 |
+
total_router_logits = []
|
1736 |
+
total_expert_indexes = []
|
1737 |
+
for router_output in router_outputs:
|
1738 |
+
if router_output is not None:
|
1739 |
+
router_logits, expert_indexes = router_output
|
1740 |
+
total_router_logits.append(router_logits)
|
1741 |
+
total_expert_indexes.append(expert_indexes)
|
1742 |
+
|
1743 |
+
total_router_logits = torch.cat(total_router_logits, dim=1) if len(total_router_logits) > 0 else None
|
1744 |
+
total_expert_indexes = torch.stack(total_expert_indexes, dim=1) if len(total_expert_indexes) > 0 else None
|
1745 |
+
return total_router_logits, total_expert_indexes
|
1746 |
+
|
1747 |
+
# Copied from transfomers.models.switch_transformers.SwitchTransformersForConditionalGeneration.prepare_inputs_for_generation
|
1748 |
+
def prepare_inputs_for_generation(
|
1749 |
+
self,
|
1750 |
+
decoder_input_ids,
|
1751 |
+
past_key_values=None,
|
1752 |
+
attention_mask=None,
|
1753 |
+
head_mask=None,
|
1754 |
+
decoder_head_mask=None,
|
1755 |
+
cross_attn_head_mask=None,
|
1756 |
+
use_cache=None,
|
1757 |
+
encoder_outputs=None,
|
1758 |
+
**kwargs,
|
1759 |
+
):
|
1760 |
+
# cut decoder_input_ids if past is used
|
1761 |
+
if past_key_values is not None:
|
1762 |
+
past_length = past_key_values[0][0].shape[2]
|
1763 |
+
|
1764 |
+
# Some generation methods already pass only the last input ID
|
1765 |
+
if decoder_input_ids.shape[1] > past_length:
|
1766 |
+
remove_prefix_length = past_length
|
1767 |
+
else:
|
1768 |
+
# Default to old behavior: keep only final ID
|
1769 |
+
remove_prefix_length = decoder_input_ids.shape[1] - 1
|
1770 |
+
|
1771 |
+
decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
|
1772 |
+
|
1773 |
+
return {
|
1774 |
+
"input_ids": None, # encoder_outputs is defined. input_ids not needed
|
1775 |
+
"encoder_outputs": encoder_outputs,
|
1776 |
+
"past_key_values": past_key_values,
|
1777 |
+
"decoder_input_ids": decoder_input_ids,
|
1778 |
+
"attention_mask": attention_mask,
|
1779 |
+
"head_mask": head_mask,
|
1780 |
+
"decoder_head_mask": decoder_head_mask,
|
1781 |
+
"cross_attn_head_mask": cross_attn_head_mask,
|
1782 |
+
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
|
1783 |
+
}
|
1784 |
+
|
1785 |
+
@staticmethod
|
1786 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1787 |
+
reordered_past = ()
|
1788 |
+
for layer_past in past_key_values:
|
1789 |
+
reordered_past += (
|
1790 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1791 |
+
)
|
1792 |
+
return reordered_past
|
llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__init__.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {
|
21 |
+
"configuration_prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig"],
|
22 |
+
"tokenization_prophetnet": ["ProphetNetTokenizer"],
|
23 |
+
}
|
24 |
+
|
25 |
+
try:
|
26 |
+
if not is_torch_available():
|
27 |
+
raise OptionalDependencyNotAvailable()
|
28 |
+
except OptionalDependencyNotAvailable:
|
29 |
+
pass
|
30 |
+
else:
|
31 |
+
_import_structure["modeling_prophetnet"] = [
|
32 |
+
"PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
|
33 |
+
"ProphetNetDecoder",
|
34 |
+
"ProphetNetEncoder",
|
35 |
+
"ProphetNetForCausalLM",
|
36 |
+
"ProphetNetForConditionalGeneration",
|
37 |
+
"ProphetNetModel",
|
38 |
+
"ProphetNetPreTrainedModel",
|
39 |
+
]
|
40 |
+
|
41 |
+
|
42 |
+
if TYPE_CHECKING:
|
43 |
+
from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig
|
44 |
+
from .tokenization_prophetnet import ProphetNetTokenizer
|
45 |
+
|
46 |
+
try:
|
47 |
+
if not is_torch_available():
|
48 |
+
raise OptionalDependencyNotAvailable()
|
49 |
+
except OptionalDependencyNotAvailable:
|
50 |
+
pass
|
51 |
+
else:
|
52 |
+
from .modeling_prophetnet import (
|
53 |
+
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
|
54 |
+
ProphetNetDecoder,
|
55 |
+
ProphetNetEncoder,
|
56 |
+
ProphetNetForCausalLM,
|
57 |
+
ProphetNetForConditionalGeneration,
|
58 |
+
ProphetNetModel,
|
59 |
+
ProphetNetPreTrainedModel,
|
60 |
+
)
|
61 |
+
|
62 |
+
else:
|
63 |
+
import sys
|
64 |
+
|
65 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.14 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc
ADDED
Binary file (7.86 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
ADDED
Binary file (3.73 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc
ADDED
Binary file (78 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc
ADDED
Binary file (17.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" ProphetNet model configuration"""
|
16 |
+
|
17 |
+
from typing import Callable, Optional, Union
|
18 |
+
|
19 |
+
from ...configuration_utils import PretrainedConfig
|
20 |
+
from ...utils import logging
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
from ..deprecated._archive_maps import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
27 |
+
|
28 |
+
|
29 |
+
class ProphetNetConfig(PretrainedConfig):
|
30 |
+
r"""
|
31 |
+
This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a
|
32 |
+
ProphetNet model according to the specified arguments, defining the model architecture. Instantiating a
|
33 |
+
configuration with the defaults will yield a similar configuration to that of the ProphetNet
|
34 |
+
[microsoft/prophetnet-large-uncased](https://huggingface.co/microsoft/prophetnet-large-uncased) architecture.
|
35 |
+
|
36 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
+
documentation from [`PretrainedConfig`] for more information.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
activation_dropout (`float`, *optional*, defaults to 0.1):
|
41 |
+
The dropout ratio for activations inside the fully connected layer.
|
42 |
+
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
|
43 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
44 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
45 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
46 |
+
Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by
|
47 |
+
the `inputs_ids` passed when calling [`ProphetNetModel`].
|
48 |
+
hidden_size (`int`, *optional*, defaults to 1024):
|
49 |
+
Dimensionality of the layers and the pooler layer.
|
50 |
+
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
51 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
52 |
+
num_encoder_layers (`int`, *optional*, defaults to 12):
|
53 |
+
Number of encoder layers.
|
54 |
+
num_encoder_attention_heads (`int`, *optional*, defaults to 16):
|
55 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
56 |
+
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
57 |
+
Dimensionality of the `intermediate` (often named feed-forward) layer in decoder.
|
58 |
+
num_decoder_layers (`int`, *optional*, defaults to 12):
|
59 |
+
Number of decoder layers.
|
60 |
+
num_decoder_attention_heads (`int`, *optional*, defaults to 16):
|
61 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
62 |
+
attention_dropout (`float`, *optional*, defaults to 0.1):
|
63 |
+
The dropout ratio for the attention probabilities.
|
64 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
65 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
66 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
67 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
68 |
+
just in case (e.g., 512 or 1024 or 2048).
|
69 |
+
init_std (`float`, *optional*, defaults to 0.02):
|
70 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
71 |
+
add_cross_attention (`bool`, *optional*, defaults to `True`):
|
72 |
+
Whether cross-attention layers should be added to the model.
|
73 |
+
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
|
74 |
+
Whether this is an encoder/decoder model.
|
75 |
+
pad_token_id (`int`, *optional*, defaults to 1)
|
76 |
+
Padding token id.
|
77 |
+
bos_token_id (`int`, *optional*, defaults to 0)
|
78 |
+
Beginning of stream token id.
|
79 |
+
eos_token_id (`int`, *optional*, defaults to 2)
|
80 |
+
End of stream token id.
|
81 |
+
ngram (`int`, *optional*, defaults to 2)
|
82 |
+
Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first
|
83 |
+
token.
|
84 |
+
num_buckets (`int`, *optional*, defaults to 32)
|
85 |
+
The number of buckets to use for each attention layer. This is for relative position calculation. See the
|
86 |
+
[T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
|
87 |
+
relative_max_distance (`int`, *optional*, defaults to 128)
|
88 |
+
Relative distances greater than this number will be put into the last same bucket. This is for relative
|
89 |
+
position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
|
90 |
+
disable_ngram_loss (`bool`, *optional*, defaults to `False`):
|
91 |
+
Whether be trained predicting only the next first token.
|
92 |
+
eps (`float`, *optional*, defaults to 0.0):
|
93 |
+
Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label
|
94 |
+
smoothing is performed.
|
95 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
96 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
97 |
+
"""
|
98 |
+
|
99 |
+
model_type = "prophetnet"
|
100 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
101 |
+
attribute_map = {
|
102 |
+
"num_attention_heads": "num_encoder_attention_heads",
|
103 |
+
}
|
104 |
+
|
105 |
+
def __init__(
|
106 |
+
self,
|
107 |
+
activation_dropout: Optional[float] = 0.1,
|
108 |
+
activation_function: Optional[Union[str, Callable]] = "gelu",
|
109 |
+
vocab_size: Optional[int] = 30522,
|
110 |
+
hidden_size: Optional[int] = 1024,
|
111 |
+
encoder_ffn_dim: Optional[int] = 4096,
|
112 |
+
num_encoder_layers: Optional[int] = 12,
|
113 |
+
num_encoder_attention_heads: Optional[int] = 16,
|
114 |
+
decoder_ffn_dim: Optional[int] = 4096,
|
115 |
+
num_decoder_layers: Optional[int] = 12,
|
116 |
+
num_decoder_attention_heads: Optional[int] = 16,
|
117 |
+
attention_dropout: Optional[float] = 0.1,
|
118 |
+
dropout: Optional[float] = 0.1,
|
119 |
+
max_position_embeddings: Optional[int] = 512,
|
120 |
+
init_std: Optional[float] = 0.02,
|
121 |
+
is_encoder_decoder: Optional[bool] = True,
|
122 |
+
add_cross_attention: Optional[bool] = True,
|
123 |
+
decoder_start_token_id: Optional[int] = 0,
|
124 |
+
ngram: Optional[int] = 2,
|
125 |
+
num_buckets: Optional[int] = 32,
|
126 |
+
relative_max_distance: Optional[int] = 128,
|
127 |
+
disable_ngram_loss: Optional[bool] = False,
|
128 |
+
eps: Optional[float] = 0.0,
|
129 |
+
use_cache: Optional[bool] = True,
|
130 |
+
pad_token_id: Optional[int] = 0,
|
131 |
+
bos_token_id: Optional[int] = 1,
|
132 |
+
eos_token_id: Optional[int] = 2,
|
133 |
+
**kwargs,
|
134 |
+
):
|
135 |
+
self.vocab_size = vocab_size
|
136 |
+
self.hidden_size = hidden_size
|
137 |
+
self.encoder_ffn_dim = encoder_ffn_dim
|
138 |
+
self.num_encoder_layers = num_encoder_layers
|
139 |
+
self.num_encoder_attention_heads = num_encoder_attention_heads
|
140 |
+
self.decoder_ffn_dim = decoder_ffn_dim
|
141 |
+
self.num_decoder_layers = num_decoder_layers
|
142 |
+
self.num_decoder_attention_heads = num_decoder_attention_heads
|
143 |
+
self.max_position_embeddings = max_position_embeddings
|
144 |
+
self.init_std = init_std # Normal(0, this parameter)
|
145 |
+
self.activation_function = activation_function
|
146 |
+
|
147 |
+
# parameters for prophetnet
|
148 |
+
self.ngram = ngram
|
149 |
+
self.num_buckets = num_buckets
|
150 |
+
self.relative_max_distance = relative_max_distance
|
151 |
+
self.disable_ngram_loss = disable_ngram_loss
|
152 |
+
self.eps = eps
|
153 |
+
|
154 |
+
# 3 Types of Dropout
|
155 |
+
self.attention_dropout = attention_dropout
|
156 |
+
self.activation_dropout = activation_dropout
|
157 |
+
self.dropout = dropout
|
158 |
+
|
159 |
+
self.use_cache = use_cache
|
160 |
+
|
161 |
+
super().__init__(
|
162 |
+
pad_token_id=pad_token_id,
|
163 |
+
bos_token_id=bos_token_id,
|
164 |
+
eos_token_id=eos_token_id,
|
165 |
+
is_encoder_decoder=is_encoder_decoder,
|
166 |
+
add_cross_attention=add_cross_attention,
|
167 |
+
decoder_start_token_id=decoder_start_token_id,
|
168 |
+
**kwargs,
|
169 |
+
)
|
170 |
+
|
171 |
+
@property
|
172 |
+
def num_hidden_layers(self) -> int:
|
173 |
+
return self.num_encoder_layers + self.num_decoder_layers
|
174 |
+
|
175 |
+
@num_hidden_layers.setter
|
176 |
+
def num_hidden_layers(self, value):
|
177 |
+
raise NotImplementedError(
|
178 |
+
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
|
179 |
+
" `num_decoder_layers`."
|
180 |
+
)
|