Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__init__.py +89 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/configuration_bridgetower.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/modeling_bridgetower.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/processing_bridgetower.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/configuration_bridgetower.py +349 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/image_processing_bridgetower.py +561 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/modeling_bridgetower.py +1898 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/processing_bridgetower.py +119 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py +88 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py +468 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py +134 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py +33 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py +331 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py +1562 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py +141 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__init__.py +59 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py +344 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py +237 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__init__.py +84 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/configuration_efficientnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/convert_efficientnet_to_pytorch.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/image_processing_efficientnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/modeling_efficientnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/configuration_efficientnet.py +169 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py +339 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/image_processing_efficientnet.py +387 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/modeling_efficientnet.py +648 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__init__.py +90 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/configuration_musicgen_melody.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/convert_musicgen_melody_transformers.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/feature_extraction_musicgen_melody.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/modeling_musicgen_melody.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/processing_musicgen_melody.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/configuration_musicgen_melody.py +271 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py +266 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/modeling_musicgen_melody.py +0 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/processing_musicgen_melody.py +174 -0
- llmeval-env/lib/python3.10/site-packages/transformers/models/owlv2/__init__.py +93 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__init__.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {
|
20 |
+
"configuration_bridgetower": [
|
21 |
+
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
22 |
+
"BridgeTowerConfig",
|
23 |
+
"BridgeTowerTextConfig",
|
24 |
+
"BridgeTowerVisionConfig",
|
25 |
+
],
|
26 |
+
"processing_bridgetower": ["BridgeTowerProcessor"],
|
27 |
+
}
|
28 |
+
|
29 |
+
try:
|
30 |
+
if not is_vision_available():
|
31 |
+
raise OptionalDependencyNotAvailable()
|
32 |
+
except OptionalDependencyNotAvailable:
|
33 |
+
pass
|
34 |
+
else:
|
35 |
+
_import_structure["image_processing_bridgetower"] = ["BridgeTowerImageProcessor"]
|
36 |
+
|
37 |
+
try:
|
38 |
+
if not is_torch_available():
|
39 |
+
raise OptionalDependencyNotAvailable()
|
40 |
+
except OptionalDependencyNotAvailable:
|
41 |
+
pass
|
42 |
+
else:
|
43 |
+
_import_structure["modeling_bridgetower"] = [
|
44 |
+
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
45 |
+
"BridgeTowerForContrastiveLearning",
|
46 |
+
"BridgeTowerForImageAndTextRetrieval",
|
47 |
+
"BridgeTowerForMaskedLM",
|
48 |
+
"BridgeTowerModel",
|
49 |
+
"BridgeTowerPreTrainedModel",
|
50 |
+
]
|
51 |
+
|
52 |
+
|
53 |
+
if TYPE_CHECKING:
|
54 |
+
from .configuration_bridgetower import (
|
55 |
+
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
56 |
+
BridgeTowerConfig,
|
57 |
+
BridgeTowerTextConfig,
|
58 |
+
BridgeTowerVisionConfig,
|
59 |
+
)
|
60 |
+
from .processing_bridgetower import BridgeTowerProcessor
|
61 |
+
|
62 |
+
try:
|
63 |
+
if not is_vision_available():
|
64 |
+
raise OptionalDependencyNotAvailable()
|
65 |
+
except OptionalDependencyNotAvailable:
|
66 |
+
pass
|
67 |
+
else:
|
68 |
+
from .image_processing_bridgetower import BridgeTowerImageProcessor
|
69 |
+
|
70 |
+
try:
|
71 |
+
if not is_torch_available():
|
72 |
+
raise OptionalDependencyNotAvailable()
|
73 |
+
except OptionalDependencyNotAvailable:
|
74 |
+
pass
|
75 |
+
else:
|
76 |
+
from .modeling_bridgetower import (
|
77 |
+
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
78 |
+
BridgeTowerForContrastiveLearning,
|
79 |
+
BridgeTowerForImageAndTextRetrieval,
|
80 |
+
BridgeTowerForMaskedLM,
|
81 |
+
BridgeTowerModel,
|
82 |
+
BridgeTowerPreTrainedModel,
|
83 |
+
)
|
84 |
+
|
85 |
+
|
86 |
+
else:
|
87 |
+
import sys
|
88 |
+
|
89 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/configuration_bridgetower.cpython-310.pyc
ADDED
Binary file (13.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/modeling_bridgetower.cpython-310.pyc
ADDED
Binary file (58.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/processing_bridgetower.cpython-310.pyc
ADDED
Binary file (4.23 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/configuration_bridgetower.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License=, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing=, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS=,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" BridgeTower model configuration"""
|
16 |
+
|
17 |
+
import os
|
18 |
+
from typing import Union
|
19 |
+
|
20 |
+
from ...configuration_utils import PretrainedConfig
|
21 |
+
from ...utils import logging
|
22 |
+
|
23 |
+
|
24 |
+
logger = logging.get_logger(__name__)
|
25 |
+
|
26 |
+
|
27 |
+
from ..deprecated._archive_maps import BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
28 |
+
|
29 |
+
|
30 |
+
class BridgeTowerVisionConfig(PretrainedConfig):
|
31 |
+
r"""
|
32 |
+
This is the configuration class to store the vision configuration of a [`BridgeTowerModel`]. Instantiating a
|
33 |
+
configuration with the defaults will yield a similar configuration to that of the bridgetower-base
|
34 |
+
[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.
|
35 |
+
|
36 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
+
documentation from [`PretrainedConfig`] for more information.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
41 |
+
Dimensionality of the encoder layers and the pooler layer.
|
42 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
43 |
+
Number of hidden layers in visual encoder model.
|
44 |
+
patch_size (`int`, *optional*, defaults to 16):
|
45 |
+
The size (resolution) of each patch.
|
46 |
+
image_size (`int`, *optional*, defaults to 288):
|
47 |
+
The size (resolution) of each image.
|
48 |
+
initializer_factor (`float`, *optional*, defaults to 1):
|
49 |
+
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
50 |
+
testing).
|
51 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
52 |
+
The epsilon used by the layer normalization layers.
|
53 |
+
stop_gradient (`bool`, *optional*, defaults to `False`):
|
54 |
+
Whether to stop gradient for training.
|
55 |
+
share_layernorm (`bool`, *optional*, defaults to `True`):
|
56 |
+
Whether LayerNorm layers are shared.
|
57 |
+
remove_last_layer (`bool`, *optional*, defaults to `False`):
|
58 |
+
Whether to remove the last layer from the vision encoder.
|
59 |
+
|
60 |
+
|
61 |
+
Example:
|
62 |
+
|
63 |
+
```python
|
64 |
+
>>> from transformers import BridgeTowerVisionConfig
|
65 |
+
|
66 |
+
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the vision model
|
67 |
+
>>> configuration = BridgeTowerVisionConfig()
|
68 |
+
|
69 |
+
>>> # Accessing the configuration
|
70 |
+
>>> configuration
|
71 |
+
```"""
|
72 |
+
|
73 |
+
model_type = "bridgetower_vision_model"
|
74 |
+
|
75 |
+
def __init__(
|
76 |
+
self,
|
77 |
+
hidden_size=768,
|
78 |
+
num_hidden_layers=12,
|
79 |
+
num_channels=3,
|
80 |
+
patch_size=16,
|
81 |
+
image_size=288,
|
82 |
+
initializer_factor=1,
|
83 |
+
layer_norm_eps=1e-05,
|
84 |
+
stop_gradient=False,
|
85 |
+
share_layernorm=True,
|
86 |
+
remove_last_layer=False,
|
87 |
+
**kwargs,
|
88 |
+
):
|
89 |
+
super().__init__(**kwargs)
|
90 |
+
self.hidden_size = hidden_size
|
91 |
+
self.num_hidden_layers = num_hidden_layers
|
92 |
+
self.num_channels = num_channels
|
93 |
+
self.patch_size = patch_size
|
94 |
+
self.image_size = image_size
|
95 |
+
self.initializer_factor = initializer_factor
|
96 |
+
self.layer_norm_eps = layer_norm_eps
|
97 |
+
self.stop_gradient = stop_gradient
|
98 |
+
self.share_layernorm = share_layernorm
|
99 |
+
self.remove_last_layer = remove_last_layer
|
100 |
+
|
101 |
+
@classmethod
|
102 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
103 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
104 |
+
|
105 |
+
if config_dict.get("model_type") == "bridgetower":
|
106 |
+
config_dict = config_dict["text_config"]
|
107 |
+
|
108 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
109 |
+
logger.warning(
|
110 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
111 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
112 |
+
)
|
113 |
+
|
114 |
+
return cls.from_dict(config_dict, **kwargs)
|
115 |
+
|
116 |
+
|
117 |
+
class BridgeTowerTextConfig(PretrainedConfig):
|
118 |
+
r"""
|
119 |
+
This is the configuration class to store the text configuration of a [`BridgeTowerModel`]. The default values here
|
120 |
+
are copied from RoBERTa. Instantiating a configuration with the defaults will yield a similar configuration to that
|
121 |
+
of the bridgetower-base [BridegTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/)
|
122 |
+
architecture.
|
123 |
+
|
124 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
125 |
+
documentation from [`PretrainedConfig`] for more information.
|
126 |
+
|
127 |
+
Args:
|
128 |
+
vocab_size (`int`, *optional*, defaults to 50265):
|
129 |
+
Vocabulary size of the text part of the model. Defines the number of different tokens that can be
|
130 |
+
represented by the `inputs_ids` passed when calling [`BridgeTowerModel`].
|
131 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
132 |
+
Dimensionality of the encoder layers and the pooler layer.
|
133 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
134 |
+
Number of hidden layers in the Transformer encoder.
|
135 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
136 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
137 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
138 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
139 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
140 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
141 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
142 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
143 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
144 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
145 |
+
The dropout ratio for the attention probabilities.
|
146 |
+
max_position_embeddings (`int`, *optional*, defaults to 514):
|
147 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
148 |
+
just in case (e.g., 512 or 1024 or 2048).
|
149 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
150 |
+
The vocabulary size of the `token_type_ids`.
|
151 |
+
initializer_factor (`float`, *optional*, defaults to 1):
|
152 |
+
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
153 |
+
testing).
|
154 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
155 |
+
The epsilon used by the layer normalization layers.
|
156 |
+
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
|
157 |
+
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
|
158 |
+
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
|
159 |
+
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
|
160 |
+
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
|
161 |
+
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
|
162 |
+
is_decoder (`bool`, *optional*, defaults to `False`):
|
163 |
+
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
|
164 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
165 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
166 |
+
relevant if `config.is_decoder=True`.
|
167 |
+
|
168 |
+
Example:
|
169 |
+
|
170 |
+
```python
|
171 |
+
>>> from transformers import BridgeTowerTextConfig
|
172 |
+
|
173 |
+
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the text model
|
174 |
+
>>> configuration = BridgeTowerTextConfig()
|
175 |
+
|
176 |
+
>>> # Accessing the configuration
|
177 |
+
>>> configuration
|
178 |
+
```"""
|
179 |
+
|
180 |
+
model_type = "bridgetower_text_model"
|
181 |
+
|
182 |
+
def __init__(
|
183 |
+
self,
|
184 |
+
vocab_size=50265,
|
185 |
+
hidden_size=768,
|
186 |
+
num_hidden_layers=12,
|
187 |
+
num_attention_heads=12,
|
188 |
+
initializer_factor=1,
|
189 |
+
intermediate_size=3072,
|
190 |
+
hidden_act="gelu",
|
191 |
+
hidden_dropout_prob=0.1,
|
192 |
+
attention_probs_dropout_prob=0.1,
|
193 |
+
max_position_embeddings=514,
|
194 |
+
type_vocab_size=1,
|
195 |
+
layer_norm_eps=1e-05,
|
196 |
+
pad_token_id=1,
|
197 |
+
bos_token_id=0,
|
198 |
+
eos_token_id=2,
|
199 |
+
position_embedding_type="absolute",
|
200 |
+
use_cache=True,
|
201 |
+
**kwargs,
|
202 |
+
):
|
203 |
+
super().__init__(**kwargs)
|
204 |
+
|
205 |
+
self.vocab_size = vocab_size
|
206 |
+
self.hidden_size = hidden_size
|
207 |
+
self.num_hidden_layers = num_hidden_layers
|
208 |
+
self.num_attention_heads = num_attention_heads
|
209 |
+
self.hidden_act = hidden_act
|
210 |
+
self.initializer_factor = initializer_factor
|
211 |
+
self.intermediate_size = intermediate_size
|
212 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
213 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
214 |
+
self.max_position_embeddings = max_position_embeddings
|
215 |
+
self.type_vocab_size = type_vocab_size
|
216 |
+
self.layer_norm_eps = layer_norm_eps
|
217 |
+
self.position_embedding_type = position_embedding_type
|
218 |
+
self.use_cache = use_cache
|
219 |
+
self.pad_token_id = pad_token_id
|
220 |
+
self.bos_token_id = bos_token_id
|
221 |
+
self.eos_token_id = eos_token_id
|
222 |
+
|
223 |
+
@classmethod
|
224 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
225 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
226 |
+
|
227 |
+
if config_dict.get("model_type") == "bridgetower":
|
228 |
+
config_dict = config_dict["text_config"]
|
229 |
+
|
230 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
231 |
+
logger.warning(
|
232 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
233 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
234 |
+
)
|
235 |
+
|
236 |
+
return cls.from_dict(config_dict, **kwargs)
|
237 |
+
|
238 |
+
|
239 |
+
class BridgeTowerConfig(PretrainedConfig):
|
240 |
+
r"""
|
241 |
+
This is the configuration class to store the configuration of a [`BridgeTowerModel`]. It is used to instantiate a
|
242 |
+
BridgeTower model according to the specified arguments, defining the model architecture. Instantiating a
|
243 |
+
configuration with the defaults will yield a similar configuration to that of the bridgetower-base
|
244 |
+
[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.
|
245 |
+
|
246 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
247 |
+
documentation from [`PretrainedConfig`] for more information.
|
248 |
+
|
249 |
+
Args:
|
250 |
+
share_cross_modal_transformer_layers (`bool`, *optional*, defaults to `True`):
|
251 |
+
Whether cross modal transformer layers are shared.
|
252 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
253 |
+
The non-linear activation function (function or string) in the encoder and pooler.
|
254 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
255 |
+
Dimensionality of the encoder layers and the pooler layer.
|
256 |
+
initializer_factor (`float`, *optional*, defaults to 1):
|
257 |
+
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
258 |
+
testing).
|
259 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
260 |
+
The epsilon used by the layer normalization layers.
|
261 |
+
share_link_tower_layers (`bool`, *optional*, defaults to `False`):
|
262 |
+
Whether the bride/link tower layers are shared.
|
263 |
+
link_tower_type (`str`, *optional*, defaults to `"add"`):
|
264 |
+
Type of the bridge/link layer.
|
265 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
266 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
267 |
+
num_hidden_layers (`int`, *optional*, defaults to 6):
|
268 |
+
Number of hidden layers in the Transformer encoder.
|
269 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
270 |
+
Whether to tie input and output embeddings.
|
271 |
+
init_layernorm_from_vision_encoder (`bool`, *optional*, defaults to `False`):
|
272 |
+
Whether to init LayerNorm from the vision encoder.
|
273 |
+
text_config (`dict`, *optional*):
|
274 |
+
Dictionary of configuration options used to initialize [`BridgeTowerTextConfig`].
|
275 |
+
vision_config (`dict`, *optional*):
|
276 |
+
Dictionary of configuration options used to initialize [`BridgeTowerVisionConfig`].
|
277 |
+
|
278 |
+
Example:
|
279 |
+
|
280 |
+
```python
|
281 |
+
>>> from transformers import BridgeTowerModel, BridgeTowerConfig
|
282 |
+
|
283 |
+
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration
|
284 |
+
>>> configuration = BridgeTowerConfig()
|
285 |
+
|
286 |
+
>>> # Initializing a model from the BridgeTower/bridgetower-base style configuration
|
287 |
+
>>> model = BridgeTowerModel(configuration)
|
288 |
+
|
289 |
+
>>> # Accessing the model configuration
|
290 |
+
>>> configuration = model.config
|
291 |
+
```"""
|
292 |
+
|
293 |
+
model_type = "bridgetower"
|
294 |
+
|
295 |
+
def __init__(
|
296 |
+
self,
|
297 |
+
share_cross_modal_transformer_layers=True,
|
298 |
+
hidden_act="gelu",
|
299 |
+
hidden_size=768,
|
300 |
+
initializer_factor=1,
|
301 |
+
layer_norm_eps=1e-05,
|
302 |
+
share_link_tower_layers=False,
|
303 |
+
link_tower_type="add",
|
304 |
+
num_attention_heads=12,
|
305 |
+
num_hidden_layers=6,
|
306 |
+
tie_word_embeddings=False,
|
307 |
+
init_layernorm_from_vision_encoder=False,
|
308 |
+
text_config=None,
|
309 |
+
vision_config=None,
|
310 |
+
**kwargs,
|
311 |
+
):
|
312 |
+
# TODO: remove this once the Hub files are updated.
|
313 |
+
_ = kwargs.pop("text_config_dict", None)
|
314 |
+
_ = kwargs.pop("vision_config_dict", None)
|
315 |
+
|
316 |
+
super().__init__(**kwargs)
|
317 |
+
self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers
|
318 |
+
self.hidden_act = hidden_act
|
319 |
+
self.hidden_size = hidden_size
|
320 |
+
self.initializer_factor = initializer_factor
|
321 |
+
self.layer_norm_eps = layer_norm_eps
|
322 |
+
self.share_link_tower_layers = share_link_tower_layers
|
323 |
+
self.link_tower_type = link_tower_type
|
324 |
+
self.num_attention_heads = num_attention_heads
|
325 |
+
self.num_hidden_layers = num_hidden_layers
|
326 |
+
self.tie_word_embeddings = tie_word_embeddings
|
327 |
+
self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder
|
328 |
+
|
329 |
+
if text_config is None:
|
330 |
+
text_config = {}
|
331 |
+
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.")
|
332 |
+
|
333 |
+
if vision_config is None:
|
334 |
+
vision_config = {}
|
335 |
+
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.")
|
336 |
+
|
337 |
+
self.text_config = BridgeTowerTextConfig(**text_config)
|
338 |
+
self.vision_config = BridgeTowerVisionConfig(**vision_config)
|
339 |
+
|
340 |
+
@classmethod
|
341 |
+
def from_text_vision_configs(
|
342 |
+
cls, text_config: BridgeTowerTextConfig, vision_config: BridgeTowerVisionConfig, **kwargs
|
343 |
+
):
|
344 |
+
r"""
|
345 |
+
Instantiate a [`BridgeTowerConfig`] (or a derived class) from BridgeTower text model configuration. Returns:
|
346 |
+
[`BridgeTowerConfig`]: An instance of a configuration object
|
347 |
+
"""
|
348 |
+
|
349 |
+
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/image_processing_bridgetower.py
ADDED
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for BridgeTower."""
|
16 |
+
|
17 |
+
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
22 |
+
from ...image_transforms import PaddingMode, center_crop, pad, resize, to_channel_dimension_format
|
23 |
+
from ...image_utils import (
|
24 |
+
OPENAI_CLIP_MEAN,
|
25 |
+
OPENAI_CLIP_STD,
|
26 |
+
ChannelDimension,
|
27 |
+
ImageInput,
|
28 |
+
PILImageResampling,
|
29 |
+
get_image_size,
|
30 |
+
infer_channel_dimension_format,
|
31 |
+
is_batched,
|
32 |
+
is_scaled_image,
|
33 |
+
to_numpy_array,
|
34 |
+
valid_images,
|
35 |
+
validate_kwargs,
|
36 |
+
validate_preprocess_arguments,
|
37 |
+
)
|
38 |
+
from ...utils import TensorType, is_vision_available, logging
|
39 |
+
|
40 |
+
|
41 |
+
if is_vision_available():
|
42 |
+
import PIL
|
43 |
+
|
44 |
+
logger = logging.get_logger(__name__)
|
45 |
+
|
46 |
+
|
47 |
+
# Copied from transformers.models.vilt.image_processing_vilt.max_across_indices
|
48 |
+
def max_across_indices(values: Iterable[Any]) -> List[Any]:
|
49 |
+
"""
|
50 |
+
Return the maximum value across all indices of an iterable of values.
|
51 |
+
"""
|
52 |
+
return [max(values_i) for values_i in zip(*values)]
|
53 |
+
|
54 |
+
|
55 |
+
# Copied from transformers.models.vilt.image_processing_vilt.make_pixel_mask
|
56 |
+
def make_pixel_mask(
|
57 |
+
image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
|
58 |
+
) -> np.ndarray:
|
59 |
+
"""
|
60 |
+
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
image (`np.ndarray`):
|
64 |
+
Image to make the pixel mask for.
|
65 |
+
output_size (`Tuple[int, int]`):
|
66 |
+
Output size of the mask.
|
67 |
+
"""
|
68 |
+
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
|
69 |
+
mask = np.zeros(output_size, dtype=np.int64)
|
70 |
+
mask[:input_height, :input_width] = 1
|
71 |
+
return mask
|
72 |
+
|
73 |
+
|
74 |
+
# Copied from transformers.models.vilt.image_processing_vilt.get_max_height_width
|
75 |
+
def get_max_height_width(
|
76 |
+
images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
|
77 |
+
) -> List[int]:
|
78 |
+
"""
|
79 |
+
Get the maximum height and width across all images in a batch.
|
80 |
+
"""
|
81 |
+
if input_data_format is None:
|
82 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
83 |
+
|
84 |
+
if input_data_format == ChannelDimension.FIRST:
|
85 |
+
_, max_height, max_width = max_across_indices([img.shape for img in images])
|
86 |
+
elif input_data_format == ChannelDimension.LAST:
|
87 |
+
max_height, max_width, _ = max_across_indices([img.shape for img in images])
|
88 |
+
else:
|
89 |
+
raise ValueError(f"Invalid channel dimension format: {input_data_format}")
|
90 |
+
return (max_height, max_width)
|
91 |
+
|
92 |
+
|
93 |
+
# Copied from transformers.models.vilt.image_processing_vilt.get_resize_output_image_size
|
94 |
+
def get_resize_output_image_size(
|
95 |
+
input_image: np.ndarray,
|
96 |
+
shorter: int = 800,
|
97 |
+
longer: int = 1333,
|
98 |
+
size_divisor: int = 32,
|
99 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
100 |
+
) -> Tuple[int, int]:
|
101 |
+
input_height, input_width = get_image_size(input_image, input_data_format)
|
102 |
+
min_size, max_size = shorter, longer
|
103 |
+
|
104 |
+
scale = min_size / min(input_height, input_width)
|
105 |
+
|
106 |
+
if input_height < input_width:
|
107 |
+
new_height = min_size
|
108 |
+
new_width = scale * input_width
|
109 |
+
else:
|
110 |
+
new_height = scale * input_height
|
111 |
+
new_width = min_size
|
112 |
+
|
113 |
+
if max(new_height, new_width) > max_size:
|
114 |
+
scale = max_size / max(new_height, new_width)
|
115 |
+
new_height = scale * new_height
|
116 |
+
new_width = scale * new_width
|
117 |
+
|
118 |
+
new_height, new_width = int(new_height + 0.5), int(new_width + 0.5)
|
119 |
+
new_height = new_height // size_divisor * size_divisor
|
120 |
+
new_width = new_width // size_divisor * size_divisor
|
121 |
+
|
122 |
+
return new_height, new_width
|
123 |
+
|
124 |
+
|
125 |
+
class BridgeTowerImageProcessor(BaseImageProcessor):
|
126 |
+
r"""
|
127 |
+
Constructs a BridgeTower image processor.
|
128 |
+
|
129 |
+
Args:
|
130 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
131 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
|
132 |
+
`do_resize` parameter in the `preprocess` method.
|
133 |
+
size (`Dict[str, int]` *optional*, defaults to `{'shortest_edge': 288}`):
|
134 |
+
Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under
|
135 |
+
`int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if
|
136 |
+
`do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method.
|
137 |
+
size_divisor (`int`, *optional*, defaults to 32):
|
138 |
+
The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`
|
139 |
+
is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.
|
140 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
141 |
+
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
|
142 |
+
overridden by the `resample` parameter in the `preprocess` method.
|
143 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
144 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
145 |
+
parameter in the `preprocess` method.
|
146 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
147 |
+
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
|
148 |
+
overridden by the `rescale_factor` parameter in the `preprocess` method.
|
149 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
150 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
151 |
+
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
|
152 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
153 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
154 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
|
155 |
+
overridden by the `image_mean` parameter in the `preprocess` method.
|
156 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
157 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
158 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
159 |
+
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
160 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
161 |
+
Whether to center crop the image. Can be overridden by the `do_center_crop` parameter in the `preprocess`
|
162 |
+
method.
|
163 |
+
crop_size (`Dict[str, int]`, *optional*):
|
164 |
+
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
|
165 |
+
Can be overridden by the `crop_size` parameter in the `preprocess` method. If unset defaults to `size`,
|
166 |
+
do_pad (`bool`, *optional*, defaults to `True`):
|
167 |
+
Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by
|
168 |
+
the `do_pad` parameter in the `preprocess` method.
|
169 |
+
"""
|
170 |
+
|
171 |
+
model_input_names = ["pixel_values"]
|
172 |
+
|
173 |
+
def __init__(
|
174 |
+
self,
|
175 |
+
do_resize: bool = True,
|
176 |
+
size: Dict[str, int] = None,
|
177 |
+
size_divisor: int = 32,
|
178 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
179 |
+
do_rescale: bool = True,
|
180 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
181 |
+
do_normalize: bool = True,
|
182 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
183 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
184 |
+
do_center_crop: bool = True,
|
185 |
+
crop_size: Dict[str, int] = None,
|
186 |
+
do_pad: bool = True,
|
187 |
+
**kwargs,
|
188 |
+
) -> None:
|
189 |
+
if "pad_and_return_pixel_mask" in kwargs:
|
190 |
+
do_pad = kwargs.pop("pad_and_return_pixel_mask")
|
191 |
+
|
192 |
+
super().__init__(**kwargs)
|
193 |
+
size = size if size is not None else {"shortest_edge": 288}
|
194 |
+
size = get_size_dict(size, default_to_square=False)
|
195 |
+
|
196 |
+
self.do_resize = do_resize
|
197 |
+
self.size = size
|
198 |
+
self.size_divisor = size_divisor
|
199 |
+
self.resample = resample
|
200 |
+
self.do_rescale = do_rescale
|
201 |
+
self.rescale_factor = rescale_factor
|
202 |
+
self.do_normalize = do_normalize
|
203 |
+
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
|
204 |
+
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
|
205 |
+
self.do_pad = do_pad
|
206 |
+
self.do_center_crop = do_center_crop
|
207 |
+
self.crop_size = crop_size
|
208 |
+
self._valid_processor_keys = [
|
209 |
+
"images",
|
210 |
+
"do_resize",
|
211 |
+
"size",
|
212 |
+
"size_divisor",
|
213 |
+
"resample",
|
214 |
+
"do_rescale",
|
215 |
+
"rescale_factor",
|
216 |
+
"do_normalize",
|
217 |
+
"image_mean",
|
218 |
+
"image_std",
|
219 |
+
"do_pad",
|
220 |
+
"do_center_crop",
|
221 |
+
"crop_size",
|
222 |
+
"return_tensors",
|
223 |
+
"data_format",
|
224 |
+
"input_data_format",
|
225 |
+
]
|
226 |
+
|
227 |
+
# Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.resize
|
228 |
+
def resize(
|
229 |
+
self,
|
230 |
+
image: np.ndarray,
|
231 |
+
size: Dict[str, int],
|
232 |
+
size_divisor: int = 32,
|
233 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
234 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
235 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
236 |
+
**kwargs,
|
237 |
+
) -> np.ndarray:
|
238 |
+
"""
|
239 |
+
Resize an image.
|
240 |
+
|
241 |
+
Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
|
242 |
+
longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
|
243 |
+
resized to the max size while preserving the aspect ratio.
|
244 |
+
|
245 |
+
Args:
|
246 |
+
image (`np.ndarray`):
|
247 |
+
Image to resize.
|
248 |
+
size (`Dict[str, int]`):
|
249 |
+
Controls the size of the output image. Should be of the form `{"shortest_edge": int}`.
|
250 |
+
size_divisor (`int`, defaults to 32):
|
251 |
+
The image is resized to a size that is a multiple of this value.
|
252 |
+
resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
253 |
+
Resampling filter to use when resiizing the image.
|
254 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
255 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
256 |
+
input_data_format (`str` or `ChannelDimension`, *optional*):
|
257 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
258 |
+
"""
|
259 |
+
size = get_size_dict(size, default_to_square=False)
|
260 |
+
if "shortest_edge" not in size:
|
261 |
+
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
|
262 |
+
shorter = size["shortest_edge"]
|
263 |
+
longer = int(1333 / 800 * shorter)
|
264 |
+
output_size = get_resize_output_image_size(
|
265 |
+
image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format
|
266 |
+
)
|
267 |
+
return resize(
|
268 |
+
image,
|
269 |
+
size=output_size,
|
270 |
+
resample=resample,
|
271 |
+
data_format=data_format,
|
272 |
+
input_data_format=input_data_format,
|
273 |
+
**kwargs,
|
274 |
+
)
|
275 |
+
|
276 |
+
def center_crop(
|
277 |
+
self,
|
278 |
+
image: np.ndarray,
|
279 |
+
size: Dict[str, int],
|
280 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
281 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
282 |
+
**kwargs,
|
283 |
+
) -> np.ndarray:
|
284 |
+
"""
|
285 |
+
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
|
286 |
+
any edge, the image is padded with 0's and then center cropped.
|
287 |
+
|
288 |
+
Args:
|
289 |
+
image (`np.ndarray`):
|
290 |
+
Image to center crop.
|
291 |
+
size (`Dict[str, int]`):
|
292 |
+
Size of the output image in the form `{"height": h, "width": w}`.
|
293 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
294 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
295 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
296 |
+
The channel dimension format of the input image. If not provided, it will be inferred from the input
|
297 |
+
image.
|
298 |
+
"""
|
299 |
+
output_size = size["shortest_edge"]
|
300 |
+
return center_crop(
|
301 |
+
image,
|
302 |
+
size=(output_size, output_size),
|
303 |
+
data_format=data_format,
|
304 |
+
input_data_format=input_data_format,
|
305 |
+
**kwargs,
|
306 |
+
)
|
307 |
+
|
308 |
+
# Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image
|
309 |
+
def _pad_image(
|
310 |
+
self,
|
311 |
+
image: np.ndarray,
|
312 |
+
output_size: Tuple[int, int],
|
313 |
+
constant_values: Union[float, Iterable[float]] = 0,
|
314 |
+
data_format: Optional[ChannelDimension] = None,
|
315 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
316 |
+
) -> np.ndarray:
|
317 |
+
"""
|
318 |
+
Pad an image with zeros to the given size.
|
319 |
+
"""
|
320 |
+
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
|
321 |
+
output_height, output_width = output_size
|
322 |
+
|
323 |
+
pad_bottom = output_height - input_height
|
324 |
+
pad_right = output_width - input_width
|
325 |
+
padding = ((0, pad_bottom), (0, pad_right))
|
326 |
+
padded_image = pad(
|
327 |
+
image,
|
328 |
+
padding,
|
329 |
+
mode=PaddingMode.CONSTANT,
|
330 |
+
constant_values=constant_values,
|
331 |
+
data_format=data_format,
|
332 |
+
input_data_format=input_data_format,
|
333 |
+
)
|
334 |
+
return padded_image
|
335 |
+
|
336 |
+
# Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad
|
337 |
+
def pad(
|
338 |
+
self,
|
339 |
+
images: List[np.ndarray],
|
340 |
+
constant_values: Union[float, Iterable[float]] = 0,
|
341 |
+
return_pixel_mask: bool = True,
|
342 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
343 |
+
data_format: Optional[ChannelDimension] = None,
|
344 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
345 |
+
) -> BatchFeature:
|
346 |
+
"""
|
347 |
+
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
|
348 |
+
in the batch and optionally returns their corresponding pixel mask.
|
349 |
+
|
350 |
+
Args:
|
351 |
+
image (`np.ndarray`):
|
352 |
+
Image to pad.
|
353 |
+
constant_values (`float` or `Iterable[float]`, *optional*):
|
354 |
+
The value to use for the padding if `mode` is `"constant"`.
|
355 |
+
return_pixel_mask (`bool`, *optional*, defaults to `True`):
|
356 |
+
Whether to return a pixel mask.
|
357 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
358 |
+
The type of tensors to return. Can be one of:
|
359 |
+
- Unset: Return a list of `np.ndarray`.
|
360 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
361 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
362 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
363 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
364 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
365 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
366 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
367 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
368 |
+
"""
|
369 |
+
pad_size = get_max_height_width(images, input_data_format=input_data_format)
|
370 |
+
|
371 |
+
padded_images = [
|
372 |
+
self._pad_image(
|
373 |
+
image,
|
374 |
+
pad_size,
|
375 |
+
constant_values=constant_values,
|
376 |
+
data_format=data_format,
|
377 |
+
input_data_format=input_data_format,
|
378 |
+
)
|
379 |
+
for image in images
|
380 |
+
]
|
381 |
+
data = {"pixel_values": padded_images}
|
382 |
+
|
383 |
+
if return_pixel_mask:
|
384 |
+
masks = [
|
385 |
+
make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
|
386 |
+
for image in images
|
387 |
+
]
|
388 |
+
data["pixel_mask"] = masks
|
389 |
+
|
390 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
391 |
+
|
392 |
+
def preprocess(
|
393 |
+
self,
|
394 |
+
images: ImageInput,
|
395 |
+
do_resize: Optional[bool] = None,
|
396 |
+
size: Optional[Dict[str, int]] = None,
|
397 |
+
size_divisor: Optional[int] = None,
|
398 |
+
resample: PILImageResampling = None,
|
399 |
+
do_rescale: Optional[bool] = None,
|
400 |
+
rescale_factor: Optional[float] = None,
|
401 |
+
do_normalize: Optional[bool] = None,
|
402 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
403 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
404 |
+
do_pad: Optional[bool] = None,
|
405 |
+
do_center_crop: Optional[bool] = None,
|
406 |
+
crop_size: Dict[str, int] = None,
|
407 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
408 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
409 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
410 |
+
**kwargs,
|
411 |
+
) -> PIL.Image.Image:
|
412 |
+
"""
|
413 |
+
Preprocess an image or batch of images.
|
414 |
+
|
415 |
+
Args:
|
416 |
+
images (`ImageInput`):
|
417 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
418 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
419 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
420 |
+
Whether to resize the image.
|
421 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
422 |
+
Controls the size of the image after `resize`. The shortest edge of the image is resized to
|
423 |
+
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
|
424 |
+
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
|
425 |
+
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
|
426 |
+
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
|
427 |
+
The image is resized to a size that is a multiple of this value.
|
428 |
+
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
|
429 |
+
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
|
430 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
431 |
+
Whether to rescale the image values between [0 - 1].
|
432 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
433 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
434 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
435 |
+
Whether to normalize the image.
|
436 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
437 |
+
Image mean to normalize the image by if `do_normalize` is set to `True`.
|
438 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
439 |
+
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
|
440 |
+
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
|
441 |
+
Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also
|
442 |
+
created and returned.
|
443 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
444 |
+
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the
|
445 |
+
image is padded with 0's and then center cropped.
|
446 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
447 |
+
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
|
448 |
+
padded with zeros and then cropped
|
449 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
450 |
+
The type of tensors to return. Can be one of:
|
451 |
+
- Unset: Return a list of `np.ndarray`.
|
452 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
453 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
454 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
455 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
456 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
457 |
+
The channel dimension format for the output image. Can be one of:
|
458 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
459 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
460 |
+
- Unset: Use the channel dimension format of the input image.
|
461 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
462 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
463 |
+
from the input image. Can be one of:
|
464 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
465 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
466 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
467 |
+
"""
|
468 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
469 |
+
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
|
470 |
+
resample = resample if resample is not None else self.resample
|
471 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
472 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
473 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
474 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
475 |
+
image_std = image_std if image_std is not None else self.image_std
|
476 |
+
do_pad = do_pad if do_pad is not None else self.do_pad
|
477 |
+
do_center_crop if do_center_crop is not None else self.do_center_crop
|
478 |
+
# For backwards compatibility. Initial version of this processor was cropping to the "size" argument, which
|
479 |
+
# it should default to if crop_size is undefined.
|
480 |
+
crop_size = (
|
481 |
+
crop_size if crop_size is not None else (self.crop_size if self.crop_size is not None else self.size)
|
482 |
+
)
|
483 |
+
|
484 |
+
size = size if size is not None else self.size
|
485 |
+
size = get_size_dict(size, default_to_square=False)
|
486 |
+
|
487 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
488 |
+
|
489 |
+
if not is_batched(images):
|
490 |
+
images = [images]
|
491 |
+
|
492 |
+
if not valid_images(images):
|
493 |
+
raise ValueError(
|
494 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
495 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
496 |
+
)
|
497 |
+
# Here, crop_size is used only if it is set, else size will be used.
|
498 |
+
validate_preprocess_arguments(
|
499 |
+
do_rescale=do_rescale,
|
500 |
+
rescale_factor=rescale_factor,
|
501 |
+
do_normalize=do_normalize,
|
502 |
+
image_mean=image_mean,
|
503 |
+
image_std=image_std,
|
504 |
+
do_pad=do_pad,
|
505 |
+
size_divisibility=size_divisor,
|
506 |
+
do_center_crop=do_center_crop,
|
507 |
+
crop_size=crop_size,
|
508 |
+
do_resize=do_resize,
|
509 |
+
size=size,
|
510 |
+
resample=resample,
|
511 |
+
)
|
512 |
+
# All transformations expect numpy arrays.
|
513 |
+
images = [to_numpy_array(image) for image in images]
|
514 |
+
|
515 |
+
if is_scaled_image(images[0]) and do_rescale:
|
516 |
+
logger.warning_once(
|
517 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
518 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
519 |
+
)
|
520 |
+
|
521 |
+
if do_resize:
|
522 |
+
images = [
|
523 |
+
self.resize(
|
524 |
+
image=image,
|
525 |
+
size=size,
|
526 |
+
size_divisor=size_divisor,
|
527 |
+
resample=resample,
|
528 |
+
input_data_format=input_data_format,
|
529 |
+
)
|
530 |
+
for image in images
|
531 |
+
]
|
532 |
+
|
533 |
+
if do_center_crop:
|
534 |
+
images = [
|
535 |
+
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
|
536 |
+
]
|
537 |
+
|
538 |
+
if do_rescale:
|
539 |
+
images = [
|
540 |
+
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
541 |
+
for image in images
|
542 |
+
]
|
543 |
+
|
544 |
+
if do_normalize:
|
545 |
+
images = [
|
546 |
+
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
547 |
+
for image in images
|
548 |
+
]
|
549 |
+
|
550 |
+
images = [
|
551 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
|
552 |
+
]
|
553 |
+
|
554 |
+
if do_pad:
|
555 |
+
encoded_outputs = self.pad(
|
556 |
+
images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format
|
557 |
+
)
|
558 |
+
else:
|
559 |
+
encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
|
560 |
+
|
561 |
+
return encoded_outputs
|
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/modeling_bridgetower.py
ADDED
@@ -0,0 +1,1898 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""PyTorch BridgeTower Model"""
|
16 |
+
|
17 |
+
import math
|
18 |
+
from collections import OrderedDict
|
19 |
+
from dataclasses import dataclass
|
20 |
+
from typing import List, Optional, Tuple, Union
|
21 |
+
|
22 |
+
import torch
|
23 |
+
import torch.utils.checkpoint
|
24 |
+
from torch import nn
|
25 |
+
from torch.nn import CrossEntropyLoss
|
26 |
+
|
27 |
+
from ...activations import ACT2FN, QuickGELUActivation
|
28 |
+
from ...modeling_outputs import (
|
29 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
30 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
31 |
+
MaskedLMOutput,
|
32 |
+
ModelOutput,
|
33 |
+
SequenceClassifierOutput,
|
34 |
+
)
|
35 |
+
from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward
|
36 |
+
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
|
37 |
+
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
38 |
+
from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig
|
39 |
+
|
40 |
+
|
41 |
+
logger = logging.get_logger(__name__)
|
42 |
+
|
43 |
+
_CONFIG_FOR_DOC = "BridgeTowerConfig"
|
44 |
+
_CHECKPOINT_FOR_DOC = "BridgeTower/bridgetower-base"
|
45 |
+
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
|
46 |
+
|
47 |
+
|
48 |
+
from ..deprecated._archive_maps import BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
49 |
+
|
50 |
+
|
51 |
+
BRIDGETOWER_START_DOCSTRING = r"""
|
52 |
+
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ subclass. Use
|
53 |
+
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
54 |
+
behavior.
|
55 |
+
|
56 |
+
Parameters:
|
57 |
+
config ([`BridgeTowerConfig`]): Model configuration class with all the parameters of the model.
|
58 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
59 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
60 |
+
"""
|
61 |
+
|
62 |
+
BRIDGETOWER_INPUTS_DOCSTRING = r"""
|
63 |
+
Args:
|
64 |
+
input_ids (`torch.LongTensor` of shape `({0})`):
|
65 |
+
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
|
66 |
+
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
|
67 |
+
IDs?](../glossary#input-ids)
|
68 |
+
|
69 |
+
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
|
70 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
71 |
+
- 1 for tokens that are **not masked**,
|
72 |
+
- 0 for tokens that are **masked**.
|
73 |
+
[What are attention masks?](../glossary#attention-mask)
|
74 |
+
|
75 |
+
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
76 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
77 |
+
1]`:
|
78 |
+
- 0 corresponds to a *sentence A* token,
|
79 |
+
- 1 corresponds to a *sentence B* token.
|
80 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
81 |
+
|
82 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
83 |
+
Pixel values. Pixel values can be obtained using [`BridgeTowerImageProcessor`]. See
|
84 |
+
[`BridgeTowerImageProcessor.__call__`] for details.
|
85 |
+
|
86 |
+
pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
|
87 |
+
Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
|
88 |
+
|
89 |
+
- 1 for pixels that are real (i.e. **not masked**),
|
90 |
+
- 0 for pixels that are padding (i.e. **masked**).
|
91 |
+
`What are attention masks? <../glossary.html#attention-mask>`__
|
92 |
+
|
93 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
94 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
95 |
+
- 1 indicates the head is **not masked**,
|
96 |
+
- 0 indicates the head is **masked**.
|
97 |
+
|
98 |
+
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
|
99 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
100 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
101 |
+
model's internal embedding lookup matrix.
|
102 |
+
|
103 |
+
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
|
104 |
+
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
|
105 |
+
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
|
106 |
+
|
107 |
+
image_token_type_idx (`int`, *optional*):
|
108 |
+
- The token type ids for images.
|
109 |
+
|
110 |
+
output_attentions (`bool`, *optional*):
|
111 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
112 |
+
tensors for more detail.
|
113 |
+
|
114 |
+
output_hidden_states (`bool`, *optional*):
|
115 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
116 |
+
more detail.
|
117 |
+
return_dict (`bool`, *optional*):
|
118 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
119 |
+
"""
|
120 |
+
|
121 |
+
|
122 |
+
@dataclass
|
123 |
+
class BridgeTowerModelOutput(ModelOutput):
|
124 |
+
"""
|
125 |
+
Output type of [`BridgeTowerModel`].
|
126 |
+
|
127 |
+
Args:
|
128 |
+
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_size)`):
|
129 |
+
Sequence of hidden-states at the text output of the last layer of the model.
|
130 |
+
image_features (`torch.FloatTensor` of shape `(batch_size, image_sequence_length, hidden_size)`):
|
131 |
+
Sequence of hidden-states at the image output of the last layer of the model.
|
132 |
+
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size x 2)`):
|
133 |
+
Concatenation of last layer hidden-state of the first token of the text and image sequence (classification
|
134 |
+
token), respectively, after further processing through layers used for auxiliary pretraining tasks.
|
135 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
136 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
137 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
|
138 |
+
the model at the output of each layer plus the optional initial embedding outputs.
|
139 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
140 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
141 |
+
sequence_length)`.
|
142 |
+
|
143 |
+
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
144 |
+
heads.
|
145 |
+
"""
|
146 |
+
|
147 |
+
text_features: torch.FloatTensor = None
|
148 |
+
image_features: torch.FloatTensor = None
|
149 |
+
pooler_output: torch.FloatTensor = None
|
150 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
151 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
152 |
+
|
153 |
+
|
154 |
+
@dataclass
|
155 |
+
class BridgeTowerContrastiveOutput(ModelOutput):
|
156 |
+
"""
|
157 |
+
Output type of ['BridgeTowerForContrastiveLearning']
|
158 |
+
|
159 |
+
Args:
|
160 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`:
|
161 |
+
Image-text contrastive loss.
|
162 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
163 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
164 |
+
text_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
|
165 |
+
The text embeddings obtained by applying the projection layer to the pooler_output.
|
166 |
+
image_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
|
167 |
+
The image embeddings obtained by applying the projection layer to the pooler_output.
|
168 |
+
cross_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
|
169 |
+
The text-image cross-modal embeddings obtained by applying the projection layer to the pooler_output.
|
170 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
171 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
172 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
|
173 |
+
the model at the output of each layer plus the optional initial embedding outputs.
|
174 |
+
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
175 |
+
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
176 |
+
sequence_length)`.
|
177 |
+
"""
|
178 |
+
|
179 |
+
loss: Optional[torch.FloatTensor] = None
|
180 |
+
logits: torch.FloatTensor = None
|
181 |
+
text_embeds: Optional[Tuple[torch.FloatTensor]] = None
|
182 |
+
image_embeds: Optional[Tuple[torch.FloatTensor]] = None
|
183 |
+
cross_embeds: Optional[Tuple[torch.FloatTensor]] = None
|
184 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
185 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
186 |
+
|
187 |
+
|
188 |
+
class BridgeTowerResidualAttention(nn.Module):
|
189 |
+
def __init__(self, config):
|
190 |
+
super().__init__()
|
191 |
+
|
192 |
+
self.attn = nn.MultiheadAttention(config.hidden_size, config.hidden_size // 64)
|
193 |
+
self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
194 |
+
self.mlp = nn.ModuleDict(
|
195 |
+
OrderedDict(
|
196 |
+
[
|
197 |
+
("c_fc", nn.Linear(config.hidden_size, config.hidden_size * 4)),
|
198 |
+
("gelu", QuickGELUActivation()),
|
199 |
+
("c_proj", nn.Linear(config.hidden_size * 4, config.hidden_size)),
|
200 |
+
]
|
201 |
+
)
|
202 |
+
)
|
203 |
+
self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
204 |
+
self.attn_mask = None
|
205 |
+
|
206 |
+
def attention(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor):
|
207 |
+
if attention_mask is not None:
|
208 |
+
attention_mask = attention_mask.to(dtype=torch.bool, device=hidden_state.device)
|
209 |
+
self.attn_mask = (
|
210 |
+
self.attn_mask.to(dtype=hidden_state.dtype, device=hidden_state.device)
|
211 |
+
if self.attn_mask is not None
|
212 |
+
else None
|
213 |
+
)
|
214 |
+
return self.attn(
|
215 |
+
hidden_state,
|
216 |
+
hidden_state,
|
217 |
+
hidden_state,
|
218 |
+
need_weights=False,
|
219 |
+
attn_mask=self.attn_mask,
|
220 |
+
key_padding_mask=attention_mask,
|
221 |
+
)[0]
|
222 |
+
|
223 |
+
def forward(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor = None):
|
224 |
+
residual_state = hidden_state + self.attention(self.ln_1(hidden_state), attention_mask)
|
225 |
+
hidden_state = self.ln_2(residual_state)
|
226 |
+
for _, layer in self.mlp.items():
|
227 |
+
hidden_state = layer(hidden_state)
|
228 |
+
hidden_state = residual_state + hidden_state
|
229 |
+
return hidden_state
|
230 |
+
|
231 |
+
|
232 |
+
class BridgeTowerTransformer(nn.Module):
|
233 |
+
def __init__(self, config):
|
234 |
+
super().__init__()
|
235 |
+
self.hidden_size = config.hidden_size
|
236 |
+
self.num_hidden_layers = config.num_hidden_layers
|
237 |
+
if config.remove_last_layer:
|
238 |
+
self.resblocks = nn.ModuleList(
|
239 |
+
[BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers - 1)]
|
240 |
+
)
|
241 |
+
else:
|
242 |
+
self.resblocks = nn.ModuleList(
|
243 |
+
[BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers)]
|
244 |
+
)
|
245 |
+
self.stop_gradient = config.stop_gradient
|
246 |
+
|
247 |
+
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
|
248 |
+
hidden_states = []
|
249 |
+
for block in self.resblocks:
|
250 |
+
hidden_state = block(hidden_state, attention_mask)
|
251 |
+
if self.stop_gradient:
|
252 |
+
hidden_states.append(hidden_state.detach())
|
253 |
+
else:
|
254 |
+
hidden_states.append(hidden_state)
|
255 |
+
return hidden_states
|
256 |
+
|
257 |
+
|
258 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->BridgeTower
|
259 |
+
class BridgeTowerVisionEmbeddings(nn.Module):
|
260 |
+
def __init__(self, config: BridgeTowerVisionConfig):
|
261 |
+
super().__init__()
|
262 |
+
self.config = config
|
263 |
+
self.embed_dim = config.hidden_size
|
264 |
+
self.image_size = config.image_size
|
265 |
+
self.patch_size = config.patch_size
|
266 |
+
|
267 |
+
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
268 |
+
|
269 |
+
self.patch_embedding = nn.Conv2d(
|
270 |
+
in_channels=config.num_channels,
|
271 |
+
out_channels=self.embed_dim,
|
272 |
+
kernel_size=self.patch_size,
|
273 |
+
stride=self.patch_size,
|
274 |
+
bias=False,
|
275 |
+
)
|
276 |
+
|
277 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
278 |
+
self.num_positions = self.num_patches + 1
|
279 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
280 |
+
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
281 |
+
|
282 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
283 |
+
batch_size = pixel_values.shape[0]
|
284 |
+
target_dtype = self.patch_embedding.weight.dtype
|
285 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
286 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
287 |
+
|
288 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
289 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
290 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
291 |
+
return embeddings
|
292 |
+
|
293 |
+
|
294 |
+
class BridgeTowerVisionTransformer(nn.Module):
|
295 |
+
def __init__(self, config):
|
296 |
+
super().__init__()
|
297 |
+
|
298 |
+
self.embeddings = BridgeTowerVisionEmbeddings(config)
|
299 |
+
self.ln_pre = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
300 |
+
self.transformer = BridgeTowerTransformer(config)
|
301 |
+
self.ln_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
302 |
+
self.share_layernorm = config.share_layernorm
|
303 |
+
if not config.share_layernorm:
|
304 |
+
self.ln_separate = nn.ModuleList(
|
305 |
+
[nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) for _ in range(config.num_hidden_layers)]
|
306 |
+
)
|
307 |
+
|
308 |
+
def forward(self, pixel_values: torch.Tensor, attention_mask):
|
309 |
+
hidden_states = self.embeddings(pixel_values)
|
310 |
+
hidden_states = self.ln_pre(hidden_states)
|
311 |
+
# NLD -> LND
|
312 |
+
hidden_states = hidden_states.permute(1, 0, 2)
|
313 |
+
|
314 |
+
hidden_states = self.transformer(hidden_states, attention_mask)
|
315 |
+
# shape = [num_hidden_layers, hidden_size, *, grid ** 2]
|
316 |
+
hidden_states = torch.stack(hidden_states, dim=0)
|
317 |
+
# shape = [num_hidden_layers, *, hidden_size, grid ** 2]
|
318 |
+
hidden_states = hidden_states.permute(0, 2, 1, 3)
|
319 |
+
if self.share_layernorm:
|
320 |
+
hidden_states = self.ln_post(hidden_states)
|
321 |
+
else:
|
322 |
+
hidden_states_stack = []
|
323 |
+
for hidden_states, ln in zip(hidden_states, self.ln_separate):
|
324 |
+
hidden_states = ln(hidden_states)
|
325 |
+
hidden_states_stack.append(hidden_states)
|
326 |
+
# shape = [num_hidden_layers, *, hidden_size, grid ** 2]
|
327 |
+
hidden_states = torch.stack(hidden_states_stack, dim=0)
|
328 |
+
return hidden_states
|
329 |
+
|
330 |
+
def forward_pre(self, pixel_values: torch.Tensor):
|
331 |
+
hidden_states = self.embeddings(pixel_values)
|
332 |
+
hidden_states = self.ln_pre(hidden_states)
|
333 |
+
# NLD -> LND
|
334 |
+
hidden_states = hidden_states.permute(1, 0, 2)
|
335 |
+
return hidden_states
|
336 |
+
|
337 |
+
def forward_post(self, hidden_state: torch.Tensor):
|
338 |
+
visual_output_post = hidden_state.permute(1, 0, 2)
|
339 |
+
visual_output_post = self.ln_post(visual_output_post)
|
340 |
+
return visual_output_post
|
341 |
+
|
342 |
+
|
343 |
+
class BridgeTowerLinkTower(nn.Module):
|
344 |
+
def __init__(self, config):
|
345 |
+
super().__init__()
|
346 |
+
self.link_tower_type = config.link_tower_type
|
347 |
+
self.hidden_size = config.hidden_size
|
348 |
+
if config.link_tower_type in ["add", "scaled_add", "interpolate"]:
|
349 |
+
if config.link_tower_type == "scaled_add":
|
350 |
+
self.scaled_factor = nn.Parameter(torch.tensor(1.0))
|
351 |
+
elif config.link_tower_type == "interpolate":
|
352 |
+
self.beta = nn.Parameter(torch.tensor(0.5))
|
353 |
+
self.LayerNorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
|
354 |
+
else:
|
355 |
+
raise NotImplementedError(f"link_tower_type {config.link_tower_type} is not implemented")
|
356 |
+
|
357 |
+
def forward(self, hidden_states, cross_modal_hidden_states, attention_mask):
|
358 |
+
if self.link_tower_type == "add":
|
359 |
+
return self.LayerNorm(hidden_states + cross_modal_hidden_states)
|
360 |
+
elif self.link_tower_type == "scaled_add":
|
361 |
+
return self.LayerNorm(hidden_states * self.scaled_factor + cross_modal_hidden_states)
|
362 |
+
elif self.link_tower_type == "interpolate":
|
363 |
+
return self.LayerNorm(hidden_states * (1 - self.beta) + cross_modal_hidden_states * self.beta)
|
364 |
+
else:
|
365 |
+
raise NotImplementedError(f"link_tower_type {self.link_tower_type} is not implemented")
|
366 |
+
|
367 |
+
|
368 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BridgeTower
|
369 |
+
class BridgeTowerSelfOutput(nn.Module):
|
370 |
+
def __init__(self, config):
|
371 |
+
super().__init__()
|
372 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
373 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
374 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
375 |
+
|
376 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
377 |
+
hidden_states = self.dense(hidden_states)
|
378 |
+
hidden_states = self.dropout(hidden_states)
|
379 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
380 |
+
return hidden_states
|
381 |
+
|
382 |
+
|
383 |
+
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BridgeTower
|
384 |
+
class BridgeTowerIntermediate(nn.Module):
|
385 |
+
def __init__(self, config):
|
386 |
+
super().__init__()
|
387 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
388 |
+
if isinstance(config.hidden_act, str):
|
389 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
390 |
+
else:
|
391 |
+
self.intermediate_act_fn = config.hidden_act
|
392 |
+
|
393 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
394 |
+
hidden_states = self.dense(hidden_states)
|
395 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
396 |
+
return hidden_states
|
397 |
+
|
398 |
+
|
399 |
+
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BridgeTower
|
400 |
+
class BridgeTowerOutput(nn.Module):
|
401 |
+
def __init__(self, config):
|
402 |
+
super().__init__()
|
403 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
404 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
405 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
406 |
+
|
407 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
408 |
+
hidden_states = self.dense(hidden_states)
|
409 |
+
hidden_states = self.dropout(hidden_states)
|
410 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
411 |
+
return hidden_states
|
412 |
+
|
413 |
+
|
414 |
+
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->BridgeTower
|
415 |
+
class BridgeTowerPooler(nn.Module):
|
416 |
+
def __init__(self, config):
|
417 |
+
super().__init__()
|
418 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
419 |
+
self.activation = nn.Tanh()
|
420 |
+
|
421 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
422 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
423 |
+
# to the first token.
|
424 |
+
first_token_tensor = hidden_states[:, 0]
|
425 |
+
pooled_output = self.dense(first_token_tensor)
|
426 |
+
pooled_output = self.activation(pooled_output)
|
427 |
+
return pooled_output
|
428 |
+
|
429 |
+
|
430 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->BridgeTower
|
431 |
+
class BridgeTowerSelfAttention(nn.Module):
|
432 |
+
def __init__(self, config, position_embedding_type=None):
|
433 |
+
super().__init__()
|
434 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
435 |
+
raise ValueError(
|
436 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
437 |
+
f"heads ({config.num_attention_heads})"
|
438 |
+
)
|
439 |
+
|
440 |
+
self.num_attention_heads = config.num_attention_heads
|
441 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
442 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
443 |
+
|
444 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
445 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
446 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
447 |
+
|
448 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
449 |
+
self.position_embedding_type = position_embedding_type or getattr(
|
450 |
+
config, "position_embedding_type", "absolute"
|
451 |
+
)
|
452 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
453 |
+
self.max_position_embeddings = config.max_position_embeddings
|
454 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
455 |
+
|
456 |
+
self.is_decoder = config.is_decoder
|
457 |
+
|
458 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
459 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
460 |
+
x = x.view(new_x_shape)
|
461 |
+
return x.permute(0, 2, 1, 3)
|
462 |
+
|
463 |
+
def forward(
|
464 |
+
self,
|
465 |
+
hidden_states: torch.Tensor,
|
466 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
467 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
468 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
469 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
470 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
471 |
+
output_attentions: Optional[bool] = False,
|
472 |
+
) -> Tuple[torch.Tensor]:
|
473 |
+
mixed_query_layer = self.query(hidden_states)
|
474 |
+
|
475 |
+
# If this is instantiated as a cross-attention module, the keys
|
476 |
+
# and values come from an encoder; the attention mask needs to be
|
477 |
+
# such that the encoder's padding tokens are not attended to.
|
478 |
+
is_cross_attention = encoder_hidden_states is not None
|
479 |
+
|
480 |
+
if is_cross_attention and past_key_value is not None:
|
481 |
+
# reuse k,v, cross_attentions
|
482 |
+
key_layer = past_key_value[0]
|
483 |
+
value_layer = past_key_value[1]
|
484 |
+
attention_mask = encoder_attention_mask
|
485 |
+
elif is_cross_attention:
|
486 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
487 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
488 |
+
attention_mask = encoder_attention_mask
|
489 |
+
elif past_key_value is not None:
|
490 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
491 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
492 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
493 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
494 |
+
else:
|
495 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
496 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
497 |
+
|
498 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
499 |
+
|
500 |
+
use_cache = past_key_value is not None
|
501 |
+
if self.is_decoder:
|
502 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
503 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
504 |
+
# key/value_states (first "if" case)
|
505 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
506 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
507 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
508 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
509 |
+
past_key_value = (key_layer, value_layer)
|
510 |
+
|
511 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
512 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
513 |
+
|
514 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
515 |
+
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
|
516 |
+
if use_cache:
|
517 |
+
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
|
518 |
+
-1, 1
|
519 |
+
)
|
520 |
+
else:
|
521 |
+
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
522 |
+
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
523 |
+
distance = position_ids_l - position_ids_r
|
524 |
+
|
525 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
526 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
527 |
+
|
528 |
+
if self.position_embedding_type == "relative_key":
|
529 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
530 |
+
attention_scores = attention_scores + relative_position_scores
|
531 |
+
elif self.position_embedding_type == "relative_key_query":
|
532 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
533 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
534 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
535 |
+
|
536 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
537 |
+
if attention_mask is not None:
|
538 |
+
# Apply the attention mask is (precomputed for all layers in BridgeTowerModel forward() function)
|
539 |
+
attention_scores = attention_scores + attention_mask
|
540 |
+
|
541 |
+
# Normalize the attention scores to probabilities.
|
542 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
543 |
+
|
544 |
+
# This is actually dropping out entire tokens to attend to, which might
|
545 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
546 |
+
attention_probs = self.dropout(attention_probs)
|
547 |
+
|
548 |
+
# Mask heads if we want to
|
549 |
+
if head_mask is not None:
|
550 |
+
attention_probs = attention_probs * head_mask
|
551 |
+
|
552 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
553 |
+
|
554 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
555 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
556 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
557 |
+
|
558 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
559 |
+
|
560 |
+
if self.is_decoder:
|
561 |
+
outputs = outputs + (past_key_value,)
|
562 |
+
return outputs
|
563 |
+
|
564 |
+
|
565 |
+
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BridgeTower
|
566 |
+
class BridgeTowerAttention(nn.Module):
|
567 |
+
def __init__(self, config, position_embedding_type=None):
|
568 |
+
super().__init__()
|
569 |
+
self.self = BridgeTowerSelfAttention(config, position_embedding_type=position_embedding_type)
|
570 |
+
self.output = BridgeTowerSelfOutput(config)
|
571 |
+
self.pruned_heads = set()
|
572 |
+
|
573 |
+
def prune_heads(self, heads):
|
574 |
+
if len(heads) == 0:
|
575 |
+
return
|
576 |
+
heads, index = find_pruneable_heads_and_indices(
|
577 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
578 |
+
)
|
579 |
+
|
580 |
+
# Prune linear layers
|
581 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
582 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
583 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
584 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
585 |
+
|
586 |
+
# Update hyper params and store pruned heads
|
587 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
588 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
589 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
590 |
+
|
591 |
+
def forward(
|
592 |
+
self,
|
593 |
+
hidden_states: torch.Tensor,
|
594 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
595 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
596 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
597 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
598 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
599 |
+
output_attentions: Optional[bool] = False,
|
600 |
+
) -> Tuple[torch.Tensor]:
|
601 |
+
self_outputs = self.self(
|
602 |
+
hidden_states,
|
603 |
+
attention_mask,
|
604 |
+
head_mask,
|
605 |
+
encoder_hidden_states,
|
606 |
+
encoder_attention_mask,
|
607 |
+
past_key_value,
|
608 |
+
output_attentions,
|
609 |
+
)
|
610 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
611 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
612 |
+
return outputs
|
613 |
+
|
614 |
+
|
615 |
+
class BridgeTowerBertCrossLayer(nn.Module):
|
616 |
+
def __init__(self, config):
|
617 |
+
super().__init__()
|
618 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
619 |
+
self.seq_len_dim = 1
|
620 |
+
self.attention = BridgeTowerAttention(config)
|
621 |
+
self.is_decoder = config.is_decoder
|
622 |
+
self.add_cross_attention = config.add_cross_attention
|
623 |
+
self.crossattention = BridgeTowerAttention(config)
|
624 |
+
self.intermediate = BridgeTowerIntermediate(config)
|
625 |
+
self.output = BridgeTowerOutput(config)
|
626 |
+
|
627 |
+
def forward(
|
628 |
+
self,
|
629 |
+
hidden_states,
|
630 |
+
encoder_hidden_states,
|
631 |
+
attention_mask=None,
|
632 |
+
head_mask=None,
|
633 |
+
encoder_attention_mask=None,
|
634 |
+
past_key_value=None,
|
635 |
+
output_attentions=False,
|
636 |
+
):
|
637 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
638 |
+
self_attention_outputs = self.attention(
|
639 |
+
hidden_states,
|
640 |
+
attention_mask=attention_mask,
|
641 |
+
head_mask=None,
|
642 |
+
output_attentions=output_attentions,
|
643 |
+
past_key_value=None,
|
644 |
+
)
|
645 |
+
attention_output = self_attention_outputs[0]
|
646 |
+
|
647 |
+
# if decoder, the last output is tuple of self-attn cache
|
648 |
+
# add self attentions if we output attention weights
|
649 |
+
outputs = self_attention_outputs[1:]
|
650 |
+
|
651 |
+
cross_attention_outputs = self.crossattention(
|
652 |
+
attention_output,
|
653 |
+
attention_mask=attention_mask,
|
654 |
+
head_mask=head_mask,
|
655 |
+
encoder_hidden_states=encoder_hidden_states,
|
656 |
+
encoder_attention_mask=encoder_attention_mask,
|
657 |
+
past_key_value=past_key_value,
|
658 |
+
output_attentions=output_attentions,
|
659 |
+
)
|
660 |
+
attention_output = cross_attention_outputs[0]
|
661 |
+
# add cross attentions if we output attention weights
|
662 |
+
outputs = outputs + cross_attention_outputs[1:-1]
|
663 |
+
|
664 |
+
layer_output = apply_chunking_to_forward(
|
665 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
666 |
+
)
|
667 |
+
outputs = (layer_output,) + outputs
|
668 |
+
|
669 |
+
return outputs
|
670 |
+
|
671 |
+
def feed_forward_chunk(self, attention_output):
|
672 |
+
intermediate_output = self.intermediate(attention_output)
|
673 |
+
layer_output = self.output(intermediate_output, attention_output)
|
674 |
+
return layer_output
|
675 |
+
|
676 |
+
|
677 |
+
class BridgeTowerTextLayer(nn.Module):
|
678 |
+
def __init__(self, config):
|
679 |
+
super().__init__()
|
680 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
681 |
+
self.seq_len_dim = 1
|
682 |
+
self.attention = BridgeTowerAttention(config)
|
683 |
+
self.is_decoder = config.is_decoder
|
684 |
+
self.add_cross_attention = config.add_cross_attention
|
685 |
+
if self.add_cross_attention:
|
686 |
+
if not self.is_decoder:
|
687 |
+
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
|
688 |
+
self.crossattention = BridgeTowerAttention(config, position_embedding_type="absolute")
|
689 |
+
self.intermediate = BridgeTowerIntermediate(config)
|
690 |
+
self.output = BridgeTowerOutput(config)
|
691 |
+
|
692 |
+
def forward(
|
693 |
+
self,
|
694 |
+
hidden_states: torch.Tensor,
|
695 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
696 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
697 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
698 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
699 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
700 |
+
output_attentions: Optional[bool] = False,
|
701 |
+
) -> Tuple[torch.Tensor]:
|
702 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
703 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
704 |
+
self_attention_outputs = self.attention(
|
705 |
+
hidden_states,
|
706 |
+
attention_mask,
|
707 |
+
head_mask,
|
708 |
+
output_attentions=output_attentions,
|
709 |
+
past_key_value=self_attn_past_key_value,
|
710 |
+
)
|
711 |
+
attention_output = self_attention_outputs[0]
|
712 |
+
|
713 |
+
# if decoder, the last output is tuple of self-attn cache
|
714 |
+
if self.is_decoder:
|
715 |
+
outputs = self_attention_outputs[1:-1]
|
716 |
+
present_key_value = self_attention_outputs[-1]
|
717 |
+
else:
|
718 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
719 |
+
|
720 |
+
cross_attn_present_key_value = None
|
721 |
+
if self.is_decoder and encoder_hidden_states is not None:
|
722 |
+
if not hasattr(self, "crossattention"):
|
723 |
+
raise ValueError(
|
724 |
+
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
|
725 |
+
" by setting `config.add_cross_attention=True`"
|
726 |
+
)
|
727 |
+
|
728 |
+
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
|
729 |
+
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
|
730 |
+
cross_attention_outputs = self.crossattention(
|
731 |
+
attention_output,
|
732 |
+
attention_mask,
|
733 |
+
head_mask,
|
734 |
+
encoder_hidden_states,
|
735 |
+
encoder_attention_mask,
|
736 |
+
cross_attn_past_key_value,
|
737 |
+
output_attentions,
|
738 |
+
)
|
739 |
+
attention_output = cross_attention_outputs[0]
|
740 |
+
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
741 |
+
|
742 |
+
# add cross-attn cache to positions 3,4 of present_key_value tuple
|
743 |
+
cross_attn_present_key_value = cross_attention_outputs[-1]
|
744 |
+
present_key_value = present_key_value + cross_attn_present_key_value
|
745 |
+
|
746 |
+
layer_output = apply_chunking_to_forward(
|
747 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
748 |
+
)
|
749 |
+
outputs = (layer_output,) + outputs
|
750 |
+
|
751 |
+
# if decoder, return the attn key/values as the last output
|
752 |
+
if self.is_decoder:
|
753 |
+
outputs = outputs + (present_key_value,)
|
754 |
+
|
755 |
+
return outputs
|
756 |
+
|
757 |
+
def feed_forward_chunk(self, attention_output):
|
758 |
+
intermediate_output = self.intermediate(attention_output)
|
759 |
+
layer_output = self.output(intermediate_output, attention_output)
|
760 |
+
return layer_output
|
761 |
+
|
762 |
+
|
763 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->BridgeTowerText
|
764 |
+
class BridgeTowerTextEncoder(nn.Module):
|
765 |
+
def __init__(self, config):
|
766 |
+
super().__init__()
|
767 |
+
self.config = config
|
768 |
+
self.layer = nn.ModuleList([BridgeTowerTextLayer(config) for _ in range(config.num_hidden_layers)])
|
769 |
+
self.gradient_checkpointing = False
|
770 |
+
|
771 |
+
def forward(
|
772 |
+
self,
|
773 |
+
hidden_states: torch.Tensor,
|
774 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
775 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
776 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
777 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
778 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
779 |
+
use_cache: Optional[bool] = None,
|
780 |
+
output_attentions: Optional[bool] = False,
|
781 |
+
output_hidden_states: Optional[bool] = False,
|
782 |
+
return_dict: Optional[bool] = True,
|
783 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
|
784 |
+
all_hidden_states = () if output_hidden_states else None
|
785 |
+
all_self_attentions = () if output_attentions else None
|
786 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
787 |
+
|
788 |
+
if self.gradient_checkpointing and self.training:
|
789 |
+
if use_cache:
|
790 |
+
logger.warning_once(
|
791 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
792 |
+
)
|
793 |
+
use_cache = False
|
794 |
+
|
795 |
+
next_decoder_cache = () if use_cache else None
|
796 |
+
for i, layer_module in enumerate(self.layer):
|
797 |
+
if output_hidden_states:
|
798 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
799 |
+
|
800 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
801 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
802 |
+
|
803 |
+
if self.gradient_checkpointing and self.training:
|
804 |
+
layer_outputs = self._gradient_checkpointing_func(
|
805 |
+
layer_module.__call__,
|
806 |
+
hidden_states,
|
807 |
+
attention_mask,
|
808 |
+
layer_head_mask,
|
809 |
+
encoder_hidden_states,
|
810 |
+
encoder_attention_mask,
|
811 |
+
past_key_value,
|
812 |
+
output_attentions,
|
813 |
+
)
|
814 |
+
else:
|
815 |
+
layer_outputs = layer_module(
|
816 |
+
hidden_states,
|
817 |
+
attention_mask,
|
818 |
+
layer_head_mask,
|
819 |
+
encoder_hidden_states,
|
820 |
+
encoder_attention_mask,
|
821 |
+
past_key_value,
|
822 |
+
output_attentions,
|
823 |
+
)
|
824 |
+
|
825 |
+
hidden_states = layer_outputs[0]
|
826 |
+
if use_cache:
|
827 |
+
next_decoder_cache += (layer_outputs[-1],)
|
828 |
+
if output_attentions:
|
829 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
830 |
+
if self.config.add_cross_attention:
|
831 |
+
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
|
832 |
+
|
833 |
+
if output_hidden_states:
|
834 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
835 |
+
|
836 |
+
if not return_dict:
|
837 |
+
return tuple(
|
838 |
+
v
|
839 |
+
for v in [
|
840 |
+
hidden_states,
|
841 |
+
next_decoder_cache,
|
842 |
+
all_hidden_states,
|
843 |
+
all_self_attentions,
|
844 |
+
all_cross_attentions,
|
845 |
+
]
|
846 |
+
if v is not None
|
847 |
+
)
|
848 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
849 |
+
last_hidden_state=hidden_states,
|
850 |
+
past_key_values=next_decoder_cache,
|
851 |
+
hidden_states=all_hidden_states,
|
852 |
+
attentions=all_self_attentions,
|
853 |
+
cross_attentions=all_cross_attentions,
|
854 |
+
)
|
855 |
+
|
856 |
+
|
857 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->BridgeTowerText
|
858 |
+
class BridgeTowerTextEmbeddings(nn.Module):
|
859 |
+
"""
|
860 |
+
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
|
861 |
+
"""
|
862 |
+
|
863 |
+
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
|
864 |
+
def __init__(self, config):
|
865 |
+
super().__init__()
|
866 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
867 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
868 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
869 |
+
|
870 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
871 |
+
# any TensorFlow checkpoint file
|
872 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
873 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
874 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
875 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
876 |
+
self.register_buffer(
|
877 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
878 |
+
)
|
879 |
+
self.register_buffer(
|
880 |
+
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
|
881 |
+
)
|
882 |
+
|
883 |
+
# End copy
|
884 |
+
self.padding_idx = config.pad_token_id
|
885 |
+
self.position_embeddings = nn.Embedding(
|
886 |
+
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
|
887 |
+
)
|
888 |
+
|
889 |
+
def forward(
|
890 |
+
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
891 |
+
):
|
892 |
+
if position_ids is None:
|
893 |
+
if input_ids is not None:
|
894 |
+
# Create the position ids from the input token ids. Any padded tokens remain padded.
|
895 |
+
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
|
896 |
+
else:
|
897 |
+
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
|
898 |
+
|
899 |
+
if input_ids is not None:
|
900 |
+
input_shape = input_ids.size()
|
901 |
+
else:
|
902 |
+
input_shape = inputs_embeds.size()[:-1]
|
903 |
+
|
904 |
+
seq_length = input_shape[1]
|
905 |
+
|
906 |
+
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
|
907 |
+
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
|
908 |
+
# issue #5664
|
909 |
+
if token_type_ids is None:
|
910 |
+
if hasattr(self, "token_type_ids"):
|
911 |
+
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
|
912 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
|
913 |
+
token_type_ids = buffered_token_type_ids_expanded
|
914 |
+
else:
|
915 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
916 |
+
|
917 |
+
if inputs_embeds is None:
|
918 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
919 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
920 |
+
|
921 |
+
embeddings = inputs_embeds + token_type_embeddings
|
922 |
+
if self.position_embedding_type == "absolute":
|
923 |
+
position_embeddings = self.position_embeddings(position_ids)
|
924 |
+
embeddings += position_embeddings
|
925 |
+
embeddings = self.LayerNorm(embeddings)
|
926 |
+
embeddings = self.dropout(embeddings)
|
927 |
+
return embeddings
|
928 |
+
|
929 |
+
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
|
930 |
+
"""
|
931 |
+
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
|
932 |
+
|
933 |
+
Args:
|
934 |
+
inputs_embeds: torch.Tensor
|
935 |
+
|
936 |
+
Returns: torch.Tensor
|
937 |
+
"""
|
938 |
+
input_shape = inputs_embeds.size()[:-1]
|
939 |
+
sequence_length = input_shape[1]
|
940 |
+
|
941 |
+
position_ids = torch.arange(
|
942 |
+
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
|
943 |
+
)
|
944 |
+
return position_ids.unsqueeze(0).expand(input_shape)
|
945 |
+
|
946 |
+
|
947 |
+
# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
|
948 |
+
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
|
949 |
+
"""
|
950 |
+
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
|
951 |
+
are ignored. This is modified from fairseq's `utils.make_positions`.
|
952 |
+
|
953 |
+
Args:
|
954 |
+
x: torch.Tensor x:
|
955 |
+
|
956 |
+
Returns: torch.Tensor
|
957 |
+
"""
|
958 |
+
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
|
959 |
+
mask = input_ids.ne(padding_idx).int()
|
960 |
+
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
|
961 |
+
return incremental_indices.long() + padding_idx
|
962 |
+
|
963 |
+
|
964 |
+
class BridgeTowerPreTrainedModel(PreTrainedModel):
|
965 |
+
"""
|
966 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
967 |
+
models.
|
968 |
+
"""
|
969 |
+
|
970 |
+
config_class = BridgeTowerConfig
|
971 |
+
base_model_prefix = "bridgetower"
|
972 |
+
supports_gradient_checkpointing = False
|
973 |
+
_no_split_modules = ["BridgeTowerSelfAttention", "BridgeTowerResidualAttention"]
|
974 |
+
_skip_keys_device_placement = "past_key_values"
|
975 |
+
|
976 |
+
def _init_weights(self, module):
|
977 |
+
if isinstance(module, BridgeTowerVisionModel):
|
978 |
+
proj_std = (module.visual.transformer.hidden_size**-0.5) * (
|
979 |
+
(2 * module.visual.transformer.num_hidden_layers) ** -0.5
|
980 |
+
)
|
981 |
+
attn_std = module.visual.transformer.hidden_size**-0.5
|
982 |
+
fc_std = (2 * module.visual.transformer.hidden_size) ** -0.5
|
983 |
+
for block in module.visual.transformer.resblocks:
|
984 |
+
nn.init.normal_(block.attn.in_proj_weight, std=attn_std * self.config.initializer_factor)
|
985 |
+
nn.init.normal_(block.attn.out_proj.weight, std=proj_std * self.config.initializer_factor)
|
986 |
+
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std * self.config.initializer_factor)
|
987 |
+
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std * self.config.initializer_factor)
|
988 |
+
|
989 |
+
nn.init.normal_(module.visual.embeddings.class_embedding, std=attn_std * self.config.initializer_factor)
|
990 |
+
nn.init.normal_(
|
991 |
+
module.visual.embeddings.position_embedding.weight, std=attn_std * self.config.initializer_factor
|
992 |
+
)
|
993 |
+
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.Embedding)):
|
994 |
+
module.weight.data.normal_(mean=0.0, std=0.05 * self.config.initializer_factor)
|
995 |
+
elif isinstance(module, nn.LayerNorm):
|
996 |
+
module.bias.data.zero_()
|
997 |
+
module.weight.data.fill_(1.0)
|
998 |
+
|
999 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
1000 |
+
module.bias.data.zero_()
|
1001 |
+
|
1002 |
+
|
1003 |
+
class BridgeTowerVisionModel(BridgeTowerPreTrainedModel):
|
1004 |
+
config_class = BridgeTowerVisionConfig
|
1005 |
+
|
1006 |
+
def __init__(self, config):
|
1007 |
+
super().__init__(config)
|
1008 |
+
self.visual = BridgeTowerVisionTransformer(config)
|
1009 |
+
|
1010 |
+
@property
|
1011 |
+
def dtype(self):
|
1012 |
+
return self.visual.embeddings.patch_embedding.weight.dtype
|
1013 |
+
|
1014 |
+
def forward(self, image, image_mask=None):
|
1015 |
+
return self.visual(image.type(self.dtype), image_mask)
|
1016 |
+
|
1017 |
+
|
1018 |
+
class BridgeTowerTextModel(BridgeTowerPreTrainedModel):
|
1019 |
+
"""
|
1020 |
+
|
1021 |
+
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
1022 |
+
cross-attention is added between the self-attention layers, following the architecture described in *Attention is
|
1023 |
+
all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
|
1024 |
+
Kaiser and Illia Polosukhin.
|
1025 |
+
|
1026 |
+
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
|
1027 |
+
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
|
1028 |
+
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
|
1029 |
+
|
1030 |
+
.. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
|
1031 |
+
|
1032 |
+
"""
|
1033 |
+
|
1034 |
+
config_class = BridgeTowerTextConfig
|
1035 |
+
|
1036 |
+
def __init__(self, config, add_pooling_layer=True):
|
1037 |
+
super().__init__(config)
|
1038 |
+
self.config = config
|
1039 |
+
|
1040 |
+
self.embeddings = BridgeTowerTextEmbeddings(config)
|
1041 |
+
self.encoder = BridgeTowerTextEncoder(config)
|
1042 |
+
|
1043 |
+
self.pooler = BridgeTowerPooler(config) if add_pooling_layer else None
|
1044 |
+
|
1045 |
+
# Initialize weights and apply final processing
|
1046 |
+
self.post_init()
|
1047 |
+
|
1048 |
+
def get_input_embeddings(self):
|
1049 |
+
return self.embeddings.word_embeddings
|
1050 |
+
|
1051 |
+
def set_input_embeddings(self, value):
|
1052 |
+
self.embeddings.word_embeddings = value
|
1053 |
+
|
1054 |
+
def _prune_heads(self, heads_to_prune):
|
1055 |
+
"""
|
1056 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
1057 |
+
class PreTrainedModel
|
1058 |
+
"""
|
1059 |
+
for layer, heads in heads_to_prune.items():
|
1060 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
1061 |
+
|
1062 |
+
# Copied from transformers.models.roberta.modeling_roberta.RobertaModel.forward
|
1063 |
+
def forward(
|
1064 |
+
self,
|
1065 |
+
input_ids: Optional[torch.Tensor] = None,
|
1066 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1067 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1068 |
+
position_ids: Optional[torch.Tensor] = None,
|
1069 |
+
head_mask: Optional[torch.Tensor] = None,
|
1070 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1071 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1072 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
1073 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1074 |
+
use_cache: Optional[bool] = None,
|
1075 |
+
output_attentions: Optional[bool] = None,
|
1076 |
+
output_hidden_states: Optional[bool] = None,
|
1077 |
+
return_dict: Optional[bool] = None,
|
1078 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
1079 |
+
r"""
|
1080 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1081 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
1082 |
+
the model is configured as a decoder.
|
1083 |
+
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1084 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
1085 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
1086 |
+
|
1087 |
+
- 1 for tokens that are **not masked**,
|
1088 |
+
- 0 for tokens that are **masked**.
|
1089 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
1090 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
1091 |
+
|
1092 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
1093 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
1094 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1095 |
+
use_cache (`bool`, *optional*):
|
1096 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1097 |
+
`past_key_values`).
|
1098 |
+
"""
|
1099 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1100 |
+
output_hidden_states = (
|
1101 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1102 |
+
)
|
1103 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1104 |
+
|
1105 |
+
if self.config.is_decoder:
|
1106 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1107 |
+
else:
|
1108 |
+
use_cache = False
|
1109 |
+
|
1110 |
+
if input_ids is not None and inputs_embeds is not None:
|
1111 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
1112 |
+
elif input_ids is not None:
|
1113 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
1114 |
+
input_shape = input_ids.size()
|
1115 |
+
elif inputs_embeds is not None:
|
1116 |
+
input_shape = inputs_embeds.size()[:-1]
|
1117 |
+
else:
|
1118 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1119 |
+
|
1120 |
+
batch_size, seq_length = input_shape
|
1121 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1122 |
+
|
1123 |
+
# past_key_values_length
|
1124 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
1125 |
+
|
1126 |
+
if attention_mask is None:
|
1127 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
1128 |
+
|
1129 |
+
if token_type_ids is None:
|
1130 |
+
if hasattr(self.embeddings, "token_type_ids"):
|
1131 |
+
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
|
1132 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
|
1133 |
+
token_type_ids = buffered_token_type_ids_expanded
|
1134 |
+
else:
|
1135 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
1136 |
+
|
1137 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
1138 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
1139 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
|
1140 |
+
|
1141 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
1142 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
1143 |
+
if self.config.is_decoder and encoder_hidden_states is not None:
|
1144 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
1145 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
1146 |
+
if encoder_attention_mask is None:
|
1147 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
1148 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
1149 |
+
else:
|
1150 |
+
encoder_extended_attention_mask = None
|
1151 |
+
|
1152 |
+
# Prepare head mask if needed
|
1153 |
+
# 1.0 in head_mask indicate we keep the head
|
1154 |
+
# attention_probs has shape bsz x n_heads x N x N
|
1155 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
1156 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
1157 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
1158 |
+
|
1159 |
+
embedding_output = self.embeddings(
|
1160 |
+
input_ids=input_ids,
|
1161 |
+
position_ids=position_ids,
|
1162 |
+
token_type_ids=token_type_ids,
|
1163 |
+
inputs_embeds=inputs_embeds,
|
1164 |
+
past_key_values_length=past_key_values_length,
|
1165 |
+
)
|
1166 |
+
encoder_outputs = self.encoder(
|
1167 |
+
embedding_output,
|
1168 |
+
attention_mask=extended_attention_mask,
|
1169 |
+
head_mask=head_mask,
|
1170 |
+
encoder_hidden_states=encoder_hidden_states,
|
1171 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
1172 |
+
past_key_values=past_key_values,
|
1173 |
+
use_cache=use_cache,
|
1174 |
+
output_attentions=output_attentions,
|
1175 |
+
output_hidden_states=output_hidden_states,
|
1176 |
+
return_dict=return_dict,
|
1177 |
+
)
|
1178 |
+
sequence_output = encoder_outputs[0]
|
1179 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
1180 |
+
|
1181 |
+
if not return_dict:
|
1182 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
1183 |
+
|
1184 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
1185 |
+
last_hidden_state=sequence_output,
|
1186 |
+
pooler_output=pooled_output,
|
1187 |
+
past_key_values=encoder_outputs.past_key_values,
|
1188 |
+
hidden_states=encoder_outputs.hidden_states,
|
1189 |
+
attentions=encoder_outputs.attentions,
|
1190 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
1191 |
+
)
|
1192 |
+
|
1193 |
+
|
1194 |
+
@add_start_docstrings(
|
1195 |
+
"The bare BridgeTower Model transformer outputting BridgeTowerModelOutput object without any specific head on"
|
1196 |
+
" top.",
|
1197 |
+
BRIDGETOWER_START_DOCSTRING,
|
1198 |
+
)
|
1199 |
+
class BridgeTowerModel(BridgeTowerPreTrainedModel):
|
1200 |
+
def __init__(self, config):
|
1201 |
+
super().__init__(config)
|
1202 |
+
self.config = config
|
1203 |
+
vision_config = config.vision_config
|
1204 |
+
text_config = config.text_config
|
1205 |
+
|
1206 |
+
if config.share_cross_modal_transformer_layers:
|
1207 |
+
self.cross_modal_text_transform = nn.Linear(text_config.hidden_size, config.hidden_size)
|
1208 |
+
self.cross_modal_image_transform = nn.Linear(vision_config.hidden_size, config.hidden_size)
|
1209 |
+
else:
|
1210 |
+
self.cross_modal_text_transform = nn.ModuleList(
|
1211 |
+
[nn.Linear(text_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]
|
1212 |
+
)
|
1213 |
+
self.cross_modal_image_transform = nn.ModuleList(
|
1214 |
+
[nn.Linear(vision_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]
|
1215 |
+
)
|
1216 |
+
|
1217 |
+
self.token_type_embeddings = nn.Embedding(2, config.hidden_size)
|
1218 |
+
|
1219 |
+
self.vision_model = BridgeTowerVisionModel(vision_config)
|
1220 |
+
|
1221 |
+
self.text_model = BridgeTowerTextModel(text_config)
|
1222 |
+
|
1223 |
+
if not vision_config.share_layernorm and config.init_layernorm_from_vision_encoder:
|
1224 |
+
for ln in self.vision_model.visual.cross_modal_ln_separate:
|
1225 |
+
ln.weight.data = self.vision_model.visual.ln_post.weight.data
|
1226 |
+
ln.bias.data = self.vision_model.visual.ln_post.bias.data
|
1227 |
+
|
1228 |
+
self.cross_modal_image_layers = nn.ModuleList(
|
1229 |
+
[BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)]
|
1230 |
+
)
|
1231 |
+
self.cross_modal_text_layers = nn.ModuleList(
|
1232 |
+
[BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)]
|
1233 |
+
)
|
1234 |
+
|
1235 |
+
# Class token => Linear => Tanh
|
1236 |
+
self.cross_modal_image_pooler = BridgeTowerPooler(config)
|
1237 |
+
self.cross_modal_text_pooler = BridgeTowerPooler(config)
|
1238 |
+
|
1239 |
+
# Initialize BridgeTower Components
|
1240 |
+
self.cross_modal_text_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
1241 |
+
self.cross_modal_image_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
1242 |
+
|
1243 |
+
if config.share_link_tower_layers:
|
1244 |
+
self.cross_modal_text_link_tower = BridgeTowerLinkTower(config)
|
1245 |
+
self.cross_modal_image_link_tower = BridgeTowerLinkTower(config)
|
1246 |
+
else:
|
1247 |
+
self.cross_modal_text_link_tower = nn.ModuleList(
|
1248 |
+
[BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]
|
1249 |
+
)
|
1250 |
+
self.cross_modal_image_link_tower = nn.ModuleList(
|
1251 |
+
[BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]
|
1252 |
+
)
|
1253 |
+
|
1254 |
+
self.post_init()
|
1255 |
+
|
1256 |
+
def get_input_embeddings(self):
|
1257 |
+
return self.text_model.get_input_embeddings()
|
1258 |
+
|
1259 |
+
def set_input_embeddings(self, value):
|
1260 |
+
self.text_model.set_input_embeddings(value)
|
1261 |
+
|
1262 |
+
@add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING)
|
1263 |
+
@replace_return_docstrings(output_type=BridgeTowerModelOutput, config_class=_CONFIG_FOR_DOC)
|
1264 |
+
def forward(
|
1265 |
+
self,
|
1266 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1267 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1268 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1269 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1270 |
+
pixel_mask: Optional[torch.LongTensor] = None,
|
1271 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1272 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1273 |
+
image_embeds: Optional[torch.FloatTensor] = None,
|
1274 |
+
image_token_type_idx: Optional[int] = None,
|
1275 |
+
output_attentions: Optional[bool] = None,
|
1276 |
+
output_hidden_states: Optional[bool] = None,
|
1277 |
+
return_dict: Optional[bool] = None,
|
1278 |
+
labels: Optional[torch.LongTensor] = None,
|
1279 |
+
) -> Union[Tuple[torch.Tensor], BridgeTowerModelOutput]:
|
1280 |
+
r"""
|
1281 |
+
output_hidden_states (`bool`, *optional*):
|
1282 |
+
If set to `True`, hidden states are returned as a list containing the hidden states of text, image, and
|
1283 |
+
cross-modal components respectively. i.e. `(hidden_states_text, hidden_states_image,
|
1284 |
+
hidden_states_cross_modal)` where each element is a list of the hidden states of the corresponding
|
1285 |
+
modality. `hidden_states_txt/img` are a list of tensors corresponding to unimodal hidden states and
|
1286 |
+
`hidden_states_cross_modal` is a list of tuples containing `cross_modal_text_hidden_states` and
|
1287 |
+
`cross_modal_image_hidden_states` of each brdige layer.
|
1288 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1289 |
+
Labels are currently not supported.
|
1290 |
+
Returns:
|
1291 |
+
|
1292 |
+
Examples:
|
1293 |
+
|
1294 |
+
```python
|
1295 |
+
>>> from transformers import BridgeTowerProcessor, BridgeTowerModel
|
1296 |
+
>>> from PIL import Image
|
1297 |
+
>>> import requests
|
1298 |
+
|
1299 |
+
>>> # prepare image and text
|
1300 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1301 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1302 |
+
>>> text = "hello world"
|
1303 |
+
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base")
|
1304 |
+
>>> model = BridgeTowerModel.from_pretrained("BridgeTower/bridgetower-base")
|
1305 |
+
|
1306 |
+
>>> inputs = processor(image, text, return_tensors="pt")
|
1307 |
+
>>> outputs = model(**inputs)
|
1308 |
+
>>> outputs.keys()
|
1309 |
+
odict_keys(['text_features', 'image_features', 'pooler_output'])
|
1310 |
+
```"""
|
1311 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1312 |
+
output_hidden_states = (
|
1313 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1314 |
+
)
|
1315 |
+
all_hidden_states_text = () if output_hidden_states else None
|
1316 |
+
all_hidden_states_image = () if output_hidden_states else None
|
1317 |
+
all_hidden_states_cross = () if output_hidden_states else None
|
1318 |
+
all_hidden_states = () if output_hidden_states else None
|
1319 |
+
all_self_attentions = () if output_attentions else None
|
1320 |
+
|
1321 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1322 |
+
image_token_type_idx = image_token_type_idx if image_token_type_idx else 1
|
1323 |
+
input_shape = input_ids.size()
|
1324 |
+
text_embeds = self.text_model.embeddings(input_ids=input_ids)
|
1325 |
+
|
1326 |
+
if output_hidden_states:
|
1327 |
+
all_hidden_states_text += (text_embeds,)
|
1328 |
+
|
1329 |
+
if attention_mask is None:
|
1330 |
+
attention_mask = torch.ones(input_shape, dtype=torch.long, device=input_ids.device)
|
1331 |
+
extend_text_masks = self.text_model.get_extended_attention_mask(attention_mask, input_shape).to(
|
1332 |
+
input_ids.device
|
1333 |
+
)
|
1334 |
+
|
1335 |
+
# The split_index determines how many layers of the uni-modal encoder are applied before the cross-modal encoder
|
1336 |
+
split_index = len(self.text_model.encoder.layer) - self.config.num_hidden_layers + 1
|
1337 |
+
|
1338 |
+
# Run the first 'split_index' layers of the textual encoder
|
1339 |
+
for layer in self.text_model.encoder.layer[:split_index]:
|
1340 |
+
text_embeds = layer(text_embeds, extend_text_masks)[0]
|
1341 |
+
|
1342 |
+
if output_hidden_states:
|
1343 |
+
all_hidden_states_text += (text_embeds,)
|
1344 |
+
|
1345 |
+
if image_embeds is None:
|
1346 |
+
image_embeds = self.vision_model.visual.forward_pre(pixel_values.type(self.vision_model.dtype))
|
1347 |
+
else:
|
1348 |
+
# Permute as BridgeTowerResidualAttention has batch_first=True
|
1349 |
+
image_embeds = image_embeds.permute(1, 0, 2)
|
1350 |
+
|
1351 |
+
if output_hidden_states:
|
1352 |
+
all_hidden_states_image += (image_embeds,)
|
1353 |
+
|
1354 |
+
# Run the first 'split_index' layers of the visual encoder
|
1355 |
+
for block in self.vision_model.visual.transformer.resblocks[:split_index]:
|
1356 |
+
image_embeds = block(image_embeds)
|
1357 |
+
if output_hidden_states:
|
1358 |
+
all_hidden_states_image += (image_embeds,)
|
1359 |
+
|
1360 |
+
image_embeds_with_ln = self.vision_model.visual.forward_post(image_embeds.type(self.vision_model.dtype))
|
1361 |
+
|
1362 |
+
# first layer is a special case because we don't have the output from the cross-encoder yet
|
1363 |
+
cross_modal_text = self.cross_modal_text_transform(text_embeds)
|
1364 |
+
|
1365 |
+
text_token_type_embeddings = self.token_type_embeddings(
|
1366 |
+
torch.zeros(1, dtype=torch.long, device=input_ids.device)
|
1367 |
+
).expand_as(cross_modal_text)
|
1368 |
+
|
1369 |
+
cross_modal_text = self.cross_modal_text_layernorm(cross_modal_text + text_token_type_embeddings)
|
1370 |
+
|
1371 |
+
image_embeds_with_ln = self.cross_modal_image_transform(image_embeds_with_ln)
|
1372 |
+
image_token_type_embeddings = self.token_type_embeddings(
|
1373 |
+
torch.full((1,), image_token_type_idx, dtype=torch.long, device=input_ids.device)
|
1374 |
+
).expand_as(image_embeds_with_ln)
|
1375 |
+
|
1376 |
+
image_embeds_with_ln = image_embeds_with_ln + image_token_type_embeddings
|
1377 |
+
cross_modal_image = self.cross_modal_image_layernorm(image_embeds_with_ln)
|
1378 |
+
|
1379 |
+
pixel_mask = torch.ones(
|
1380 |
+
(cross_modal_image.size(0), cross_modal_image.size(1)),
|
1381 |
+
dtype=torch.long,
|
1382 |
+
device=input_ids.device,
|
1383 |
+
)
|
1384 |
+
extend_image_masks = self.text_model.get_extended_attention_mask(pixel_mask, pixel_mask.size()).to(
|
1385 |
+
input_ids.device
|
1386 |
+
)
|
1387 |
+
|
1388 |
+
layer_outputs_text = self.cross_modal_text_layers[0](
|
1389 |
+
cross_modal_text,
|
1390 |
+
cross_modal_image,
|
1391 |
+
attention_mask=extend_text_masks,
|
1392 |
+
encoder_attention_mask=extend_image_masks,
|
1393 |
+
output_attentions=output_attentions,
|
1394 |
+
)
|
1395 |
+
cross_text_features = layer_outputs_text[0]
|
1396 |
+
|
1397 |
+
layer_outputs_image = self.cross_modal_image_layers[0](
|
1398 |
+
cross_modal_image,
|
1399 |
+
cross_modal_text,
|
1400 |
+
attention_mask=extend_image_masks,
|
1401 |
+
encoder_attention_mask=extend_text_masks,
|
1402 |
+
output_attentions=output_attentions,
|
1403 |
+
)
|
1404 |
+
cross_image_features = layer_outputs_image[0]
|
1405 |
+
|
1406 |
+
if output_hidden_states:
|
1407 |
+
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
|
1408 |
+
|
1409 |
+
if output_attentions:
|
1410 |
+
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
|
1411 |
+
|
1412 |
+
link_layer_index = 0
|
1413 |
+
|
1414 |
+
# Each of the top 6 layers of the visual and textual encoders ([split_index:]) is connected to each layer of
|
1415 |
+
# the cross-modal encoder via bridge layers, which brings bottom-up alignment and fusion to the cross-modal encoder.
|
1416 |
+
for i in range(split_index, len(self.text_model.encoder.layer)):
|
1417 |
+
text_embeds = self.text_model.encoder.layer[i](text_embeds, extend_text_masks)[0]
|
1418 |
+
image_embeds = self.vision_model.visual.transformer.resblocks[i](image_embeds).type(
|
1419 |
+
self.vision_model.dtype
|
1420 |
+
)
|
1421 |
+
image_embeds_with_ln = (
|
1422 |
+
self.cross_modal_image_transform(self.vision_model.visual.forward_post(image_embeds))
|
1423 |
+
+ image_token_type_embeddings
|
1424 |
+
)
|
1425 |
+
|
1426 |
+
text_link_tower = self.cross_modal_text_link_tower[link_layer_index]
|
1427 |
+
image_link_tower = self.cross_modal_image_link_tower[link_layer_index]
|
1428 |
+
|
1429 |
+
# Bridge layers for textual and visual encoders
|
1430 |
+
cross_text_features_ = text_link_tower(
|
1431 |
+
self.cross_modal_text_transform(text_embeds) + text_token_type_embeddings,
|
1432 |
+
cross_text_features,
|
1433 |
+
extend_text_masks,
|
1434 |
+
)
|
1435 |
+
cross_image_features_ = image_link_tower(image_embeds_with_ln, cross_image_features, extend_image_masks)
|
1436 |
+
|
1437 |
+
# Cross-modal encoder via bridge layers of textual and visual encoders
|
1438 |
+
layer_outputs_text = self.cross_modal_text_layers[link_layer_index + 1](
|
1439 |
+
cross_text_features_,
|
1440 |
+
cross_image_features_,
|
1441 |
+
attention_mask=extend_text_masks,
|
1442 |
+
encoder_attention_mask=extend_image_masks,
|
1443 |
+
output_attentions=output_attentions,
|
1444 |
+
)
|
1445 |
+
cross_text_features = layer_outputs_text[0]
|
1446 |
+
|
1447 |
+
layer_outputs_image = self.cross_modal_image_layers[link_layer_index + 1](
|
1448 |
+
cross_image_features_,
|
1449 |
+
cross_text_features_,
|
1450 |
+
attention_mask=extend_image_masks,
|
1451 |
+
encoder_attention_mask=extend_text_masks,
|
1452 |
+
output_attentions=output_attentions,
|
1453 |
+
)
|
1454 |
+
cross_image_features = layer_outputs_image[0]
|
1455 |
+
|
1456 |
+
link_layer_index += 1
|
1457 |
+
|
1458 |
+
if output_hidden_states:
|
1459 |
+
all_hidden_states_text += (text_embeds,)
|
1460 |
+
all_hidden_states_image += (image_embeds,)
|
1461 |
+
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
|
1462 |
+
|
1463 |
+
if output_attentions:
|
1464 |
+
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
|
1465 |
+
|
1466 |
+
# Concatenate the cls token of the text and image features to get the final represtation
|
1467 |
+
text_features, image_features = cross_text_features, cross_image_features
|
1468 |
+
cls_features = self.get_cls_features(text_features, image_features)
|
1469 |
+
|
1470 |
+
if output_hidden_states:
|
1471 |
+
all_hidden_states = (all_hidden_states_text, all_hidden_states_image, all_hidden_states_cross)
|
1472 |
+
|
1473 |
+
if not return_dict:
|
1474 |
+
return tuple(
|
1475 |
+
v
|
1476 |
+
for v in [text_features, image_features, cls_features, all_hidden_states, all_self_attentions]
|
1477 |
+
if v is not None
|
1478 |
+
)
|
1479 |
+
|
1480 |
+
return BridgeTowerModelOutput(
|
1481 |
+
text_features=text_features,
|
1482 |
+
image_features=image_features,
|
1483 |
+
pooler_output=cls_features,
|
1484 |
+
hidden_states=all_hidden_states,
|
1485 |
+
attentions=all_self_attentions,
|
1486 |
+
)
|
1487 |
+
|
1488 |
+
def get_cls_features(self, text_features, image_features):
|
1489 |
+
cls_features_text = self.cross_modal_text_pooler(text_features)
|
1490 |
+
cls_features_image = self.cross_modal_image_pooler(image_features)
|
1491 |
+
return torch.cat([cls_features_text, cls_features_image], dim=-1)
|
1492 |
+
|
1493 |
+
|
1494 |
+
# Copied from transformers.models.vilt.modeling_vilt.ViltPredictionHeadTransform with Vilt->BridgeTower
|
1495 |
+
class BridgeTowerPredictionHeadTransform(nn.Module):
|
1496 |
+
def __init__(self, config):
|
1497 |
+
super().__init__()
|
1498 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
1499 |
+
if isinstance(config.hidden_act, str):
|
1500 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
1501 |
+
else:
|
1502 |
+
self.transform_act_fn = config.hidden_act
|
1503 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
1504 |
+
|
1505 |
+
def forward(self, hidden_states):
|
1506 |
+
hidden_states = self.dense(hidden_states)
|
1507 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
1508 |
+
hidden_states = self.LayerNorm(hidden_states)
|
1509 |
+
return hidden_states
|
1510 |
+
|
1511 |
+
|
1512 |
+
class BridgeTowerMLMHead(nn.Module):
|
1513 |
+
def __init__(self, config, weight=None):
|
1514 |
+
super().__init__()
|
1515 |
+
self.config = config
|
1516 |
+
self.transform = BridgeTowerPredictionHeadTransform(config)
|
1517 |
+
self.decoder = nn.Linear(config.hidden_size, config.text_config.vocab_size, bias=False)
|
1518 |
+
self.bias = nn.Parameter(torch.zeros(config.text_config.vocab_size))
|
1519 |
+
if weight is not None:
|
1520 |
+
self.decoder.weight = weight
|
1521 |
+
|
1522 |
+
def forward(self, x):
|
1523 |
+
mlm_score = self.transform(x)
|
1524 |
+
mlm_score = self.decoder(mlm_score) + self.bias
|
1525 |
+
return mlm_score
|
1526 |
+
|
1527 |
+
|
1528 |
+
class BridgeTowerITMHead(nn.Module):
|
1529 |
+
def __init__(self, hidden_size):
|
1530 |
+
super().__init__()
|
1531 |
+
self.fc = nn.Linear(hidden_size, 2)
|
1532 |
+
|
1533 |
+
def forward(self, x):
|
1534 |
+
itm_score = self.fc(x)
|
1535 |
+
return itm_score
|
1536 |
+
|
1537 |
+
|
1538 |
+
@add_start_docstrings(
|
1539 |
+
"""
|
1540 |
+
BridgeTower Model with a language modeling head on top as done during pretraining.
|
1541 |
+
""",
|
1542 |
+
BRIDGETOWER_START_DOCSTRING,
|
1543 |
+
)
|
1544 |
+
class BridgeTowerForMaskedLM(BridgeTowerPreTrainedModel):
|
1545 |
+
_tied_weights_keys = ["mlm_score.decoder.weight"]
|
1546 |
+
|
1547 |
+
def __init__(self, config):
|
1548 |
+
super().__init__(config)
|
1549 |
+
|
1550 |
+
self.bridgetower = BridgeTowerModel(config)
|
1551 |
+
self.mlm_score = BridgeTowerMLMHead(config)
|
1552 |
+
|
1553 |
+
# Initialize weights and apply final processing
|
1554 |
+
self.post_init()
|
1555 |
+
|
1556 |
+
def get_output_embeddings(self):
|
1557 |
+
return self.mlm_score.decoder
|
1558 |
+
|
1559 |
+
def set_output_embeddings(self, new_embeddings):
|
1560 |
+
self.mlm_score.decoder = new_embeddings
|
1561 |
+
|
1562 |
+
@add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1563 |
+
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
|
1564 |
+
def forward(
|
1565 |
+
self,
|
1566 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1567 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1568 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1569 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1570 |
+
pixel_mask: Optional[torch.LongTensor] = None,
|
1571 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1572 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1573 |
+
image_embeds: Optional[torch.FloatTensor] = None,
|
1574 |
+
output_attentions: Optional[bool] = None,
|
1575 |
+
output_hidden_states: Optional[bool] = None,
|
1576 |
+
return_dict: Optional[bool] = None,
|
1577 |
+
labels: Optional[torch.LongTensor] = None,
|
1578 |
+
) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]:
|
1579 |
+
r"""
|
1580 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1581 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
1582 |
+
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
|
1583 |
+
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
1584 |
+
Returns:
|
1585 |
+
|
1586 |
+
Examples:
|
1587 |
+
|
1588 |
+
```python
|
1589 |
+
>>> from transformers import BridgeTowerProcessor, BridgeTowerForMaskedLM
|
1590 |
+
>>> from PIL import Image
|
1591 |
+
>>> import requests
|
1592 |
+
|
1593 |
+
>>> url = "http://images.cocodataset.org/val2017/000000360943.jpg"
|
1594 |
+
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
|
1595 |
+
>>> text = "a <mask> looking out of the window"
|
1596 |
+
|
1597 |
+
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
|
1598 |
+
>>> model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
|
1599 |
+
|
1600 |
+
>>> # prepare inputs
|
1601 |
+
>>> encoding = processor(image, text, return_tensors="pt")
|
1602 |
+
|
1603 |
+
>>> # forward pass
|
1604 |
+
>>> outputs = model(**encoding)
|
1605 |
+
|
1606 |
+
>>> results = processor.decode(outputs.logits.argmax(dim=-1).squeeze(0).tolist())
|
1607 |
+
|
1608 |
+
>>> print(results)
|
1609 |
+
.a cat looking out of the window.
|
1610 |
+
```"""
|
1611 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1612 |
+
outputs = self.bridgetower(
|
1613 |
+
input_ids,
|
1614 |
+
attention_mask=attention_mask,
|
1615 |
+
token_type_ids=token_type_ids,
|
1616 |
+
pixel_values=pixel_values,
|
1617 |
+
pixel_mask=pixel_mask,
|
1618 |
+
head_mask=head_mask,
|
1619 |
+
inputs_embeds=inputs_embeds,
|
1620 |
+
image_embeds=image_embeds,
|
1621 |
+
output_attentions=output_attentions,
|
1622 |
+
output_hidden_states=output_hidden_states,
|
1623 |
+
return_dict=return_dict,
|
1624 |
+
)
|
1625 |
+
|
1626 |
+
mlm_logits = self.mlm_score(outputs.text_features if return_dict else outputs[0])
|
1627 |
+
masked_lm_loss = None
|
1628 |
+
if labels is not None:
|
1629 |
+
loss_fct = CrossEntropyLoss() # -100 index = padding token
|
1630 |
+
|
1631 |
+
labels = labels.to(mlm_logits.device)
|
1632 |
+
masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.text_config.vocab_size), labels.view(-1))
|
1633 |
+
|
1634 |
+
if not return_dict:
|
1635 |
+
output = tuple(mlm_logits)
|
1636 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
1637 |
+
|
1638 |
+
return MaskedLMOutput(
|
1639 |
+
loss=masked_lm_loss,
|
1640 |
+
logits=mlm_logits,
|
1641 |
+
hidden_states=outputs.hidden_states,
|
1642 |
+
attentions=outputs.attentions,
|
1643 |
+
)
|
1644 |
+
|
1645 |
+
|
1646 |
+
@add_start_docstrings(
|
1647 |
+
"""
|
1648 |
+
BridgeTower Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the
|
1649 |
+
[CLS] token) for image-to-text matching.
|
1650 |
+
""",
|
1651 |
+
BRIDGETOWER_START_DOCSTRING,
|
1652 |
+
)
|
1653 |
+
class BridgeTowerForImageAndTextRetrieval(BridgeTowerPreTrainedModel):
|
1654 |
+
def __init__(self, config):
|
1655 |
+
super().__init__(config)
|
1656 |
+
|
1657 |
+
self.bridgetower = BridgeTowerModel(config)
|
1658 |
+
|
1659 |
+
self.itm_score = BridgeTowerITMHead(config.hidden_size * 2)
|
1660 |
+
|
1661 |
+
# Initialize weights and apply final processing
|
1662 |
+
self.post_init()
|
1663 |
+
|
1664 |
+
@add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING)
|
1665 |
+
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
|
1666 |
+
def forward(
|
1667 |
+
self,
|
1668 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1669 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1670 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1671 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1672 |
+
pixel_mask: Optional[torch.LongTensor] = None,
|
1673 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1674 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1675 |
+
image_embeds: Optional[torch.FloatTensor] = None,
|
1676 |
+
output_attentions: Optional[bool] = None,
|
1677 |
+
output_hidden_states: Optional[bool] = None,
|
1678 |
+
return_dict: Optional[bool] = None,
|
1679 |
+
labels: Optional[torch.LongTensor] = None,
|
1680 |
+
) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
|
1681 |
+
r"""
|
1682 |
+
labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
|
1683 |
+
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
|
1684 |
+
The pairs with 0 will be skipped for calculation.
|
1685 |
+
Returns:
|
1686 |
+
|
1687 |
+
Examples:
|
1688 |
+
|
1689 |
+
```python
|
1690 |
+
>>> from transformers import BridgeTowerProcessor, BridgeTowerForImageAndTextRetrieval
|
1691 |
+
>>> import requests
|
1692 |
+
>>> from PIL import Image
|
1693 |
+
|
1694 |
+
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
1695 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1696 |
+
>>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"]
|
1697 |
+
|
1698 |
+
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
|
1699 |
+
>>> model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
|
1700 |
+
|
1701 |
+
>>> # forward pass
|
1702 |
+
>>> scores = dict()
|
1703 |
+
>>> for text in texts:
|
1704 |
+
... # prepare inputs
|
1705 |
+
... encoding = processor(image, text, return_tensors="pt")
|
1706 |
+
... outputs = model(**encoding)
|
1707 |
+
... scores[text] = outputs.logits[0, 1].item()
|
1708 |
+
```"""
|
1709 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1710 |
+
|
1711 |
+
outputs = self.bridgetower(
|
1712 |
+
input_ids,
|
1713 |
+
attention_mask=attention_mask,
|
1714 |
+
token_type_ids=token_type_ids,
|
1715 |
+
pixel_values=pixel_values,
|
1716 |
+
pixel_mask=pixel_mask,
|
1717 |
+
head_mask=head_mask,
|
1718 |
+
inputs_embeds=inputs_embeds,
|
1719 |
+
image_embeds=image_embeds,
|
1720 |
+
output_attentions=output_attentions,
|
1721 |
+
output_hidden_states=output_hidden_states,
|
1722 |
+
return_dict=return_dict,
|
1723 |
+
)
|
1724 |
+
|
1725 |
+
pooler_output = outputs.pooler_output if return_dict else outputs[2]
|
1726 |
+
|
1727 |
+
logits = self.itm_score(pooler_output)
|
1728 |
+
|
1729 |
+
itm_loss = None
|
1730 |
+
if labels is not None:
|
1731 |
+
loss_fct = CrossEntropyLoss()
|
1732 |
+
|
1733 |
+
labels = labels.to(logits.device)
|
1734 |
+
itm_loss = loss_fct(logits, labels)
|
1735 |
+
|
1736 |
+
if not return_dict:
|
1737 |
+
output = tuple(logits)
|
1738 |
+
return ((itm_loss,) + output) if itm_loss is not None else output
|
1739 |
+
|
1740 |
+
return SequenceClassifierOutput(
|
1741 |
+
loss=itm_loss,
|
1742 |
+
logits=logits,
|
1743 |
+
hidden_states=outputs.hidden_states,
|
1744 |
+
attentions=outputs.attentions,
|
1745 |
+
)
|
1746 |
+
|
1747 |
+
|
1748 |
+
class BridgeTowerContrastiveHead(nn.Module):
|
1749 |
+
def __init__(self, hidden_size, embed_size):
|
1750 |
+
super().__init__()
|
1751 |
+
self.fc = nn.Linear(hidden_size, embed_size)
|
1752 |
+
|
1753 |
+
def forward(self, x):
|
1754 |
+
x = self.fc(x)
|
1755 |
+
return x
|
1756 |
+
|
1757 |
+
|
1758 |
+
@add_start_docstrings(
|
1759 |
+
"""
|
1760 |
+
BridgeTower Model with a image-text contrastive head on top computing image-text contrastive loss.
|
1761 |
+
""",
|
1762 |
+
BRIDGETOWER_START_DOCSTRING,
|
1763 |
+
)
|
1764 |
+
class BridgeTowerForContrastiveLearning(BridgeTowerPreTrainedModel):
|
1765 |
+
def __init__(self, config):
|
1766 |
+
super().__init__(config)
|
1767 |
+
|
1768 |
+
self.bridgetower = BridgeTowerModel(config)
|
1769 |
+
|
1770 |
+
self.itc_text_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size)
|
1771 |
+
self.itc_image_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size)
|
1772 |
+
self.itc_cross_modal_head = BridgeTowerContrastiveHead(config.hidden_size * 2, config.contrastive_hidden_size)
|
1773 |
+
|
1774 |
+
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
1775 |
+
# Initialize weights and apply final processing
|
1776 |
+
self.post_init()
|
1777 |
+
|
1778 |
+
@add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING)
|
1779 |
+
@replace_return_docstrings(output_type=BridgeTowerContrastiveOutput, config_class=_CONFIG_FOR_DOC)
|
1780 |
+
def forward(
|
1781 |
+
self,
|
1782 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1783 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1784 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1785 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1786 |
+
pixel_mask: Optional[torch.LongTensor] = None,
|
1787 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1788 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1789 |
+
image_embeds: Optional[torch.FloatTensor] = None,
|
1790 |
+
output_attentions: Optional[bool] = None,
|
1791 |
+
output_hidden_states: Optional[bool] = True,
|
1792 |
+
return_dict: Optional[bool] = None,
|
1793 |
+
return_loss: Optional[bool] = None,
|
1794 |
+
) -> Union[BridgeTowerContrastiveOutput, Tuple[torch.FloatTensor]]:
|
1795 |
+
r"""
|
1796 |
+
return_loss (`bool`, *optional*):
|
1797 |
+
Whether or not to return the contrastive loss.
|
1798 |
+
Returns:
|
1799 |
+
|
1800 |
+
Examples:
|
1801 |
+
|
1802 |
+
```python
|
1803 |
+
>>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning
|
1804 |
+
>>> import requests
|
1805 |
+
>>> from PIL import Image
|
1806 |
+
>>> import torch
|
1807 |
+
|
1808 |
+
>>> image_urls = [
|
1809 |
+
... "https://farm4.staticflickr.com/3395/3428278415_81c3e27f15_z.jpg",
|
1810 |
+
... "http://images.cocodataset.org/val2017/000000039769.jpg",
|
1811 |
+
... ]
|
1812 |
+
>>> texts = ["two dogs in a car", "two cats sleeping on a couch"]
|
1813 |
+
>>> images = [Image.open(requests.get(url, stream=True).raw) for url in image_urls]
|
1814 |
+
|
1815 |
+
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
|
1816 |
+
>>> model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
|
1817 |
+
|
1818 |
+
>>> inputs = processor(images, texts, padding=True, return_tensors="pt")
|
1819 |
+
>>> loss = model(**inputs, return_loss=True).loss
|
1820 |
+
|
1821 |
+
>>> inputs = processor(images, texts[::-1], padding=True, return_tensors="pt")
|
1822 |
+
>>> loss_swapped = model(**inputs, return_loss=True).loss
|
1823 |
+
|
1824 |
+
>>> print("Loss", round(loss.item(), 4))
|
1825 |
+
Loss 0.0019
|
1826 |
+
|
1827 |
+
>>> print("Loss with swapped images", round(loss_swapped.item(), 4))
|
1828 |
+
Loss with swapped images 2.126
|
1829 |
+
```"""
|
1830 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1831 |
+
|
1832 |
+
outputs = self.bridgetower(
|
1833 |
+
input_ids,
|
1834 |
+
attention_mask=attention_mask,
|
1835 |
+
token_type_ids=token_type_ids,
|
1836 |
+
pixel_values=pixel_values,
|
1837 |
+
pixel_mask=pixel_mask,
|
1838 |
+
head_mask=head_mask,
|
1839 |
+
inputs_embeds=inputs_embeds,
|
1840 |
+
image_embeds=image_embeds,
|
1841 |
+
output_attentions=output_attentions,
|
1842 |
+
output_hidden_states=True,
|
1843 |
+
return_dict=return_dict,
|
1844 |
+
)
|
1845 |
+
|
1846 |
+
pooler_output = outputs.pooler_output if return_dict else outputs[2]
|
1847 |
+
hidden_states_txt, hidden_states_img, hidden_states_cross_modal = (
|
1848 |
+
outputs.hidden_states if return_dict else outputs[3]
|
1849 |
+
)
|
1850 |
+
|
1851 |
+
text_embeds = hidden_states_txt[-1]
|
1852 |
+
image_embeds = hidden_states_img[-1]
|
1853 |
+
|
1854 |
+
image_embeds_with_ln = self.bridgetower.vision_model.visual.forward_post(image_embeds)
|
1855 |
+
image_token_type_embeddings = self.bridgetower.token_type_embeddings(
|
1856 |
+
torch.full((1,), 1, dtype=torch.long, device=self.bridgetower.token_type_embeddings.weight.device)
|
1857 |
+
).expand_as(image_embeds_with_ln)
|
1858 |
+
|
1859 |
+
image_embeds = self.bridgetower.cross_modal_image_transform(image_embeds_with_ln) + image_token_type_embeddings
|
1860 |
+
|
1861 |
+
# normalized features
|
1862 |
+
text_embeds = nn.functional.normalize(self.itc_text_head(text_embeds[:, 0, :]), dim=-1, p=2)
|
1863 |
+
image_embeds = nn.functional.normalize(self.itc_image_head(image_embeds[:, 0, :]), dim=-1, p=2).to(
|
1864 |
+
device=text_embeds.device
|
1865 |
+
)
|
1866 |
+
cross_embeds = nn.functional.normalize(self.itc_cross_modal_head(pooler_output), dim=-1, p=2).to(
|
1867 |
+
device=text_embeds.device
|
1868 |
+
)
|
1869 |
+
|
1870 |
+
logits = torch.stack([text_embeds, image_embeds, cross_embeds], dim=-2)
|
1871 |
+
|
1872 |
+
logit_scale = self.logit_scale.exp().to(device=text_embeds.device)
|
1873 |
+
logits_text_to_image = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
|
1874 |
+
logits_text_to_cross = torch.matmul(text_embeds, cross_embeds.t()) * logit_scale
|
1875 |
+
logits_image_to_cross = torch.matmul(image_embeds, cross_embeds.t()) * logit_scale
|
1876 |
+
|
1877 |
+
itc_loss = None
|
1878 |
+
|
1879 |
+
if return_loss:
|
1880 |
+
labels = torch.arange(len(logits), device=logits.device)
|
1881 |
+
text_to_image_loss = nn.functional.cross_entropy(logits_text_to_image, labels)
|
1882 |
+
text_to_cross_loss = nn.functional.cross_entropy(logits_text_to_cross, labels)
|
1883 |
+
image_to_cross_loss = nn.functional.cross_entropy(logits_image_to_cross, labels)
|
1884 |
+
itc_loss = (text_to_image_loss + text_to_cross_loss + image_to_cross_loss) / 3.0
|
1885 |
+
|
1886 |
+
if not return_dict:
|
1887 |
+
output = (logits, text_embeds, image_embeds, cross_embeds) + outputs[3:]
|
1888 |
+
return ((itc_loss,) + output) if itc_loss is not None else output
|
1889 |
+
|
1890 |
+
return BridgeTowerContrastiveOutput(
|
1891 |
+
loss=itc_loss,
|
1892 |
+
logits=logits,
|
1893 |
+
text_embeds=text_embeds,
|
1894 |
+
image_embeds=image_embeds,
|
1895 |
+
cross_embeds=cross_embeds,
|
1896 |
+
hidden_states=outputs.hidden_states,
|
1897 |
+
attentions=outputs.attentions,
|
1898 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/processing_bridgetower.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Processor class for BridgeTower.
|
17 |
+
"""
|
18 |
+
|
19 |
+
from typing import List, Optional, Union
|
20 |
+
|
21 |
+
from ...processing_utils import ProcessorMixin
|
22 |
+
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
|
23 |
+
from ...utils import TensorType
|
24 |
+
|
25 |
+
|
26 |
+
class BridgeTowerProcessor(ProcessorMixin):
|
27 |
+
r"""
|
28 |
+
Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single
|
29 |
+
processor.
|
30 |
+
|
31 |
+
[`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and
|
32 |
+
[`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and
|
33 |
+
[`~BridgeTowerProcessor.decode`] for more information.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
image_processor (`BridgeTowerImageProcessor`):
|
37 |
+
An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input.
|
38 |
+
tokenizer (`RobertaTokenizerFast`):
|
39 |
+
An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input.
|
40 |
+
"""
|
41 |
+
|
42 |
+
attributes = ["image_processor", "tokenizer"]
|
43 |
+
image_processor_class = "BridgeTowerImageProcessor"
|
44 |
+
tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
|
45 |
+
|
46 |
+
def __init__(self, image_processor, tokenizer):
|
47 |
+
super().__init__(image_processor, tokenizer)
|
48 |
+
|
49 |
+
def __call__(
|
50 |
+
self,
|
51 |
+
images,
|
52 |
+
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
53 |
+
add_special_tokens: bool = True,
|
54 |
+
padding: Union[bool, str, PaddingStrategy] = False,
|
55 |
+
truncation: Union[bool, str, TruncationStrategy] = None,
|
56 |
+
max_length: Optional[int] = None,
|
57 |
+
stride: int = 0,
|
58 |
+
pad_to_multiple_of: Optional[int] = None,
|
59 |
+
return_token_type_ids: Optional[bool] = None,
|
60 |
+
return_attention_mask: Optional[bool] = None,
|
61 |
+
return_overflowing_tokens: bool = False,
|
62 |
+
return_special_tokens_mask: bool = False,
|
63 |
+
return_offsets_mapping: bool = False,
|
64 |
+
return_length: bool = False,
|
65 |
+
verbose: bool = True,
|
66 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
67 |
+
**kwargs,
|
68 |
+
) -> BatchEncoding:
|
69 |
+
"""
|
70 |
+
This method uses [`BridgeTowerImageProcessor.__call__`] method to prepare image(s) for the model, and
|
71 |
+
[`RobertaTokenizerFast.__call__`] to prepare text for the model.
|
72 |
+
|
73 |
+
Please refer to the docstring of the above two methods for more information.
|
74 |
+
"""
|
75 |
+
encoding = self.tokenizer(
|
76 |
+
text=text,
|
77 |
+
add_special_tokens=add_special_tokens,
|
78 |
+
padding=padding,
|
79 |
+
truncation=truncation,
|
80 |
+
max_length=max_length,
|
81 |
+
stride=stride,
|
82 |
+
pad_to_multiple_of=pad_to_multiple_of,
|
83 |
+
return_token_type_ids=return_token_type_ids,
|
84 |
+
return_attention_mask=return_attention_mask,
|
85 |
+
return_overflowing_tokens=return_overflowing_tokens,
|
86 |
+
return_special_tokens_mask=return_special_tokens_mask,
|
87 |
+
return_offsets_mapping=return_offsets_mapping,
|
88 |
+
return_length=return_length,
|
89 |
+
verbose=verbose,
|
90 |
+
return_tensors=return_tensors,
|
91 |
+
**kwargs,
|
92 |
+
)
|
93 |
+
# add pixel_values + pixel_mask
|
94 |
+
encoding_image_processor = self.image_processor(
|
95 |
+
images, return_tensors=return_tensors, do_normalize=True, do_center_crop=True, **kwargs
|
96 |
+
)
|
97 |
+
encoding.update(encoding_image_processor)
|
98 |
+
|
99 |
+
return encoding
|
100 |
+
|
101 |
+
def batch_decode(self, *args, **kwargs):
|
102 |
+
"""
|
103 |
+
This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
104 |
+
refer to the docstring of this method for more information.
|
105 |
+
"""
|
106 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
107 |
+
|
108 |
+
def decode(self, *args, **kwargs):
|
109 |
+
"""
|
110 |
+
This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
|
111 |
+
to the docstring of this method for more information.
|
112 |
+
"""
|
113 |
+
return self.tokenizer.decode(*args, **kwargs)
|
114 |
+
|
115 |
+
@property
|
116 |
+
def model_input_names(self):
|
117 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
118 |
+
image_processor_input_names = self.image_processor.model_input_names
|
119 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__init__.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
17 |
+
|
18 |
+
|
19 |
+
_import_structure = {
|
20 |
+
"configuration_chinese_clip": [
|
21 |
+
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
22 |
+
"ChineseCLIPConfig",
|
23 |
+
"ChineseCLIPOnnxConfig",
|
24 |
+
"ChineseCLIPTextConfig",
|
25 |
+
"ChineseCLIPVisionConfig",
|
26 |
+
],
|
27 |
+
"processing_chinese_clip": ["ChineseCLIPProcessor"],
|
28 |
+
}
|
29 |
+
|
30 |
+
try:
|
31 |
+
if not is_vision_available():
|
32 |
+
raise OptionalDependencyNotAvailable()
|
33 |
+
except OptionalDependencyNotAvailable:
|
34 |
+
pass
|
35 |
+
else:
|
36 |
+
_import_structure["feature_extraction_chinese_clip"] = ["ChineseCLIPFeatureExtractor"]
|
37 |
+
_import_structure["image_processing_chinese_clip"] = ["ChineseCLIPImageProcessor"]
|
38 |
+
|
39 |
+
try:
|
40 |
+
if not is_torch_available():
|
41 |
+
raise OptionalDependencyNotAvailable()
|
42 |
+
except OptionalDependencyNotAvailable:
|
43 |
+
pass
|
44 |
+
else:
|
45 |
+
_import_structure["modeling_chinese_clip"] = [
|
46 |
+
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
47 |
+
"ChineseCLIPModel",
|
48 |
+
"ChineseCLIPPreTrainedModel",
|
49 |
+
"ChineseCLIPTextModel",
|
50 |
+
"ChineseCLIPVisionModel",
|
51 |
+
]
|
52 |
+
|
53 |
+
if TYPE_CHECKING:
|
54 |
+
from .configuration_chinese_clip import (
|
55 |
+
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
56 |
+
ChineseCLIPConfig,
|
57 |
+
ChineseCLIPOnnxConfig,
|
58 |
+
ChineseCLIPTextConfig,
|
59 |
+
ChineseCLIPVisionConfig,
|
60 |
+
)
|
61 |
+
from .processing_chinese_clip import ChineseCLIPProcessor
|
62 |
+
|
63 |
+
try:
|
64 |
+
if not is_vision_available():
|
65 |
+
raise OptionalDependencyNotAvailable()
|
66 |
+
except OptionalDependencyNotAvailable:
|
67 |
+
pass
|
68 |
+
else:
|
69 |
+
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
|
70 |
+
|
71 |
+
try:
|
72 |
+
if not is_torch_available():
|
73 |
+
raise OptionalDependencyNotAvailable()
|
74 |
+
except OptionalDependencyNotAvailable:
|
75 |
+
pass
|
76 |
+
else:
|
77 |
+
from .modeling_chinese_clip import (
|
78 |
+
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
|
79 |
+
ChineseCLIPModel,
|
80 |
+
ChineseCLIPPreTrainedModel,
|
81 |
+
ChineseCLIPTextModel,
|
82 |
+
ChineseCLIPVisionModel,
|
83 |
+
)
|
84 |
+
|
85 |
+
else:
|
86 |
+
import sys
|
87 |
+
|
88 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.49 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/configuration_chinese_clip.cpython-310.pyc
ADDED
Binary file (17.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/convert_chinese_clip_original_pytorch_to_hf.cpython-310.pyc
ADDED
Binary file (4.05 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/feature_extraction_chinese_clip.cpython-310.pyc
ADDED
Binary file (1.07 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/image_processing_chinese_clip.cpython-310.pyc
ADDED
Binary file (13.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/modeling_chinese_clip.cpython-310.pyc
ADDED
Binary file (48.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/__pycache__/processing_chinese_clip.cpython-310.pyc
ADDED
Binary file (6.02 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/configuration_chinese_clip.py
ADDED
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" Chinese-CLIP model configuration"""
|
16 |
+
|
17 |
+
import os
|
18 |
+
from collections import OrderedDict
|
19 |
+
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
|
20 |
+
|
21 |
+
|
22 |
+
if TYPE_CHECKING:
|
23 |
+
from ...processing_utils import ProcessorMixin
|
24 |
+
from ...utils import TensorType
|
25 |
+
|
26 |
+
from ...configuration_utils import PretrainedConfig
|
27 |
+
from ...onnx import OnnxConfig
|
28 |
+
from ...utils import logging
|
29 |
+
|
30 |
+
|
31 |
+
logger = logging.get_logger(__name__)
|
32 |
+
|
33 |
+
|
34 |
+
from ..deprecated._archive_maps import CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
35 |
+
|
36 |
+
|
37 |
+
class ChineseCLIPTextConfig(PretrainedConfig):
|
38 |
+
r"""
|
39 |
+
This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a
|
40 |
+
Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a
|
41 |
+
configuration with the defaults will yield a similar configuration to that of the Chinese CLIP
|
42 |
+
[OFA-Sys/chinese-clip-vit-base-patch16](https:
|
43 |
+
//huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
|
44 |
+
|
45 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
46 |
+
documentation from [`PretrainedConfig`] for more information.
|
47 |
+
|
48 |
+
|
49 |
+
Args:
|
50 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
51 |
+
Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented
|
52 |
+
by the `inputs_ids` passed when calling [`ChineseCLIPModel`].
|
53 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
54 |
+
Dimensionality of the encoder layers and the pooler layer.
|
55 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
56 |
+
Number of hidden layers in the Transformer encoder.
|
57 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
58 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
59 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
60 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
61 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
62 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
63 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
64 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
65 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
66 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
67 |
+
The dropout ratio for the attention probabilities.
|
68 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
69 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
70 |
+
just in case (e.g., 512 or 1024 or 2048).
|
71 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
72 |
+
The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`].
|
73 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
74 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
75 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
76 |
+
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
77 |
+
testing).
|
78 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
79 |
+
The epsilon used by the layer normalization layers.
|
80 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
81 |
+
Padding token id.
|
82 |
+
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
|
83 |
+
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
|
84 |
+
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
|
85 |
+
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
|
86 |
+
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
|
87 |
+
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
|
88 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
89 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
90 |
+
relevant if `config.is_decoder=True`.
|
91 |
+
|
92 |
+
Example:
|
93 |
+
|
94 |
+
```python
|
95 |
+
>>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel
|
96 |
+
|
97 |
+
>>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
|
98 |
+
>>> configuration = ChineseCLIPTextConfig()
|
99 |
+
|
100 |
+
>>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
|
101 |
+
>>> model = ChineseCLIPTextModel(configuration)
|
102 |
+
|
103 |
+
>>> # Accessing the model configuration
|
104 |
+
>>> configuration = model.config
|
105 |
+
```"""
|
106 |
+
|
107 |
+
model_type = "chinese_clip_text_model"
|
108 |
+
|
109 |
+
def __init__(
|
110 |
+
self,
|
111 |
+
vocab_size=30522,
|
112 |
+
hidden_size=768,
|
113 |
+
num_hidden_layers=12,
|
114 |
+
num_attention_heads=12,
|
115 |
+
intermediate_size=3072,
|
116 |
+
hidden_act="gelu",
|
117 |
+
hidden_dropout_prob=0.1,
|
118 |
+
attention_probs_dropout_prob=0.1,
|
119 |
+
max_position_embeddings=512,
|
120 |
+
type_vocab_size=2,
|
121 |
+
initializer_range=0.02,
|
122 |
+
initializer_factor=1.0,
|
123 |
+
layer_norm_eps=1e-12,
|
124 |
+
pad_token_id=0,
|
125 |
+
position_embedding_type="absolute",
|
126 |
+
use_cache=True,
|
127 |
+
**kwargs,
|
128 |
+
):
|
129 |
+
super().__init__(pad_token_id=pad_token_id, **kwargs)
|
130 |
+
|
131 |
+
self.vocab_size = vocab_size
|
132 |
+
self.hidden_size = hidden_size
|
133 |
+
self.num_hidden_layers = num_hidden_layers
|
134 |
+
self.num_attention_heads = num_attention_heads
|
135 |
+
self.hidden_act = hidden_act
|
136 |
+
self.intermediate_size = intermediate_size
|
137 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
138 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
139 |
+
self.max_position_embeddings = max_position_embeddings
|
140 |
+
self.type_vocab_size = type_vocab_size
|
141 |
+
self.initializer_range = initializer_range
|
142 |
+
self.initializer_factor = initializer_factor
|
143 |
+
self.layer_norm_eps = layer_norm_eps
|
144 |
+
self.position_embedding_type = position_embedding_type
|
145 |
+
self.use_cache = use_cache
|
146 |
+
|
147 |
+
@classmethod
|
148 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
149 |
+
cls._set_token_in_kwargs(kwargs)
|
150 |
+
|
151 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
152 |
+
|
153 |
+
# get the vision config dict if we are loading from ChineseCLIPConfig
|
154 |
+
if config_dict.get("model_type") == "chinese_clip":
|
155 |
+
config_dict = config_dict["text_config"]
|
156 |
+
|
157 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
158 |
+
logger.warning(
|
159 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
160 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
161 |
+
)
|
162 |
+
|
163 |
+
return cls.from_dict(config_dict, **kwargs)
|
164 |
+
|
165 |
+
|
166 |
+
class ChineseCLIPVisionConfig(PretrainedConfig):
|
167 |
+
r"""
|
168 |
+
This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an
|
169 |
+
ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a
|
170 |
+
configuration with the defaults will yield a similar configuration to that of the ChineseCLIP
|
171 |
+
[OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
|
172 |
+
|
173 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
174 |
+
documentation from [`PretrainedConfig`] for more information.
|
175 |
+
|
176 |
+
|
177 |
+
Args:
|
178 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
179 |
+
Dimensionality of the encoder layers and the pooler layer.
|
180 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
181 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
182 |
+
projection_dim (`int`, *optional*, defaults to 512):
|
183 |
+
Dimentionality of text and vision projection layers.
|
184 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
185 |
+
Number of hidden layers in the Transformer encoder.
|
186 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
187 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
188 |
+
num_channels (`int`, *optional*, defaults to 3):
|
189 |
+
The number of input channels.
|
190 |
+
image_size (`int`, *optional*, defaults to 224):
|
191 |
+
The size (resolution) of each image.
|
192 |
+
patch_size (`int`, *optional*, defaults to 32):
|
193 |
+
The size (resolution) of each patch.
|
194 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
|
195 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
196 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
|
197 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
198 |
+
The epsilon used by the layer normalization layers.
|
199 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
200 |
+
The dropout ratio for the attention probabilities.
|
201 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
202 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
203 |
+
initializer_factor (`float`, *optional*, defaults to 1.0):
|
204 |
+
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
205 |
+
testing).
|
206 |
+
Example:
|
207 |
+
```python
|
208 |
+
>>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel
|
209 |
+
|
210 |
+
>>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
|
211 |
+
>>> configuration = ChineseCLIPVisionConfig()
|
212 |
+
|
213 |
+
>>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
|
214 |
+
>>> model = ChineseCLIPVisionModel(configuration)
|
215 |
+
|
216 |
+
>>> # Accessing the model configuration
|
217 |
+
>>> configuration = model.config
|
218 |
+
```"""
|
219 |
+
|
220 |
+
model_type = "chinese_clip_vision_model"
|
221 |
+
|
222 |
+
def __init__(
|
223 |
+
self,
|
224 |
+
hidden_size=768,
|
225 |
+
intermediate_size=3072,
|
226 |
+
projection_dim=512,
|
227 |
+
num_hidden_layers=12,
|
228 |
+
num_attention_heads=12,
|
229 |
+
num_channels=3,
|
230 |
+
image_size=224,
|
231 |
+
patch_size=32,
|
232 |
+
hidden_act="quick_gelu",
|
233 |
+
layer_norm_eps=1e-5,
|
234 |
+
attention_dropout=0.0,
|
235 |
+
initializer_range=0.02,
|
236 |
+
initializer_factor=1.0,
|
237 |
+
**kwargs,
|
238 |
+
):
|
239 |
+
super().__init__(**kwargs)
|
240 |
+
|
241 |
+
self.hidden_size = hidden_size
|
242 |
+
self.intermediate_size = intermediate_size
|
243 |
+
self.projection_dim = projection_dim
|
244 |
+
self.num_hidden_layers = num_hidden_layers
|
245 |
+
self.num_attention_heads = num_attention_heads
|
246 |
+
self.num_channels = num_channels
|
247 |
+
self.patch_size = patch_size
|
248 |
+
self.image_size = image_size
|
249 |
+
self.initializer_range = initializer_range
|
250 |
+
self.initializer_factor = initializer_factor
|
251 |
+
self.attention_dropout = attention_dropout
|
252 |
+
self.layer_norm_eps = layer_norm_eps
|
253 |
+
self.hidden_act = hidden_act
|
254 |
+
|
255 |
+
@classmethod
|
256 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
|
257 |
+
cls._set_token_in_kwargs(kwargs)
|
258 |
+
|
259 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
260 |
+
|
261 |
+
# get the vision config dict if we are loading from ChineseCLIPConfig
|
262 |
+
if config_dict.get("model_type") == "chinese_clip":
|
263 |
+
config_dict = config_dict["vision_config"]
|
264 |
+
|
265 |
+
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
|
266 |
+
logger.warning(
|
267 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
268 |
+
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
|
269 |
+
)
|
270 |
+
|
271 |
+
return cls.from_dict(config_dict, **kwargs)
|
272 |
+
|
273 |
+
|
274 |
+
class ChineseCLIPConfig(PretrainedConfig):
|
275 |
+
r"""
|
276 |
+
[`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used
|
277 |
+
to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model
|
278 |
+
configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
|
279 |
+
Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16)
|
280 |
+
architecture.
|
281 |
+
|
282 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
283 |
+
documentation from [`PretrainedConfig`] for more information.
|
284 |
+
|
285 |
+
Args:
|
286 |
+
text_config (`dict`, *optional*):
|
287 |
+
Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`].
|
288 |
+
vision_config (`dict`, *optional*):
|
289 |
+
Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`].
|
290 |
+
projection_dim (`int`, *optional*, defaults to 512):
|
291 |
+
Dimentionality of text and vision projection layers.
|
292 |
+
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
|
293 |
+
The inital value of the *logit_scale* paramter. Default is used as per the original ChineseCLIP
|
294 |
+
implementation.
|
295 |
+
kwargs (*optional*):
|
296 |
+
Dictionary of keyword arguments.
|
297 |
+
|
298 |
+
Example:
|
299 |
+
|
300 |
+
```python
|
301 |
+
>>> from transformers import ChineseCLIPConfig, ChineseCLIPModel
|
302 |
+
|
303 |
+
>>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
|
304 |
+
>>> configuration = ChineseCLIPConfig()
|
305 |
+
|
306 |
+
>>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
|
307 |
+
>>> model = ChineseCLIPModel(configuration)
|
308 |
+
|
309 |
+
>>> # Accessing the model configuration
|
310 |
+
>>> configuration = model.config
|
311 |
+
|
312 |
+
>>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig
|
313 |
+
|
314 |
+
>>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration
|
315 |
+
>>> config_text = ChineseCLIPTextConfig()
|
316 |
+
>>> config_vision = ChineseCLIPVisionConfig()
|
317 |
+
|
318 |
+
>>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision)
|
319 |
+
```"""
|
320 |
+
|
321 |
+
model_type = "chinese_clip"
|
322 |
+
|
323 |
+
def __init__(
|
324 |
+
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
|
325 |
+
):
|
326 |
+
# If `_config_dict` exist, we use them for the backward compatibility.
|
327 |
+
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
|
328 |
+
# of confusion!).
|
329 |
+
text_config_dict = kwargs.pop("text_config_dict", None)
|
330 |
+
vision_config_dict = kwargs.pop("vision_config_dict", None)
|
331 |
+
|
332 |
+
super().__init__(**kwargs)
|
333 |
+
|
334 |
+
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
|
335 |
+
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
|
336 |
+
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
|
337 |
+
if text_config_dict is not None:
|
338 |
+
if text_config is None:
|
339 |
+
text_config = {}
|
340 |
+
|
341 |
+
# This is the complete result when using `text_config_dict`.
|
342 |
+
_text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict()
|
343 |
+
|
344 |
+
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
|
345 |
+
for key, value in _text_config_dict.items():
|
346 |
+
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
|
347 |
+
# If specified in `text_config_dict`
|
348 |
+
if key in text_config_dict:
|
349 |
+
message = (
|
350 |
+
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
|
351 |
+
f'The value `text_config_dict["{key}"]` will be used instead.'
|
352 |
+
)
|
353 |
+
# If inferred from default argument values (just to be super careful)
|
354 |
+
else:
|
355 |
+
message = (
|
356 |
+
f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. "
|
357 |
+
f'The value `text_config["{key}"]` will be overriden.'
|
358 |
+
)
|
359 |
+
logger.info(message)
|
360 |
+
|
361 |
+
# Update all values in `text_config` with the ones in `_text_config_dict`.
|
362 |
+
text_config.update(_text_config_dict)
|
363 |
+
|
364 |
+
if vision_config_dict is not None:
|
365 |
+
if vision_config is None:
|
366 |
+
vision_config = {}
|
367 |
+
|
368 |
+
# This is the complete result when using `vision_config_dict`.
|
369 |
+
_vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict()
|
370 |
+
# convert keys to string instead of integer
|
371 |
+
if "id2label" in _vision_config_dict:
|
372 |
+
_vision_config_dict["id2label"] = {
|
373 |
+
str(key): value for key, value in _vision_config_dict["id2label"].items()
|
374 |
+
}
|
375 |
+
|
376 |
+
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
|
377 |
+
for key, value in _vision_config_dict.items():
|
378 |
+
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
|
379 |
+
# If specified in `vision_config_dict`
|
380 |
+
if key in vision_config_dict:
|
381 |
+
message = (
|
382 |
+
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
|
383 |
+
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
|
384 |
+
)
|
385 |
+
# If inferred from default argument values (just to be super careful)
|
386 |
+
else:
|
387 |
+
message = (
|
388 |
+
f"`vision_config_dict` is provided which will be used to initialize "
|
389 |
+
f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overriden.'
|
390 |
+
)
|
391 |
+
logger.info(message)
|
392 |
+
|
393 |
+
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
|
394 |
+
vision_config.update(_vision_config_dict)
|
395 |
+
|
396 |
+
if text_config is None:
|
397 |
+
text_config = {}
|
398 |
+
logger.info("`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.")
|
399 |
+
|
400 |
+
if vision_config is None:
|
401 |
+
vision_config = {}
|
402 |
+
logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.")
|
403 |
+
|
404 |
+
self.text_config = ChineseCLIPTextConfig(**text_config)
|
405 |
+
self.vision_config = ChineseCLIPVisionConfig(**vision_config)
|
406 |
+
|
407 |
+
self.projection_dim = projection_dim
|
408 |
+
self.logit_scale_init_value = logit_scale_init_value
|
409 |
+
self.initializer_factor = 1.0
|
410 |
+
self.initializer_range = 0.02
|
411 |
+
|
412 |
+
@classmethod
|
413 |
+
def from_text_vision_configs(
|
414 |
+
cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs
|
415 |
+
):
|
416 |
+
r"""
|
417 |
+
Instantiate a [`ChineseCLIPConfig`] (or a derived class) from Chinese-CLIP text model configuration and
|
418 |
+
Chinese-CLIP vision model configuration. Returns:
|
419 |
+
[`ChineseCLIPConfig`]: An instance of a configuration object
|
420 |
+
"""
|
421 |
+
|
422 |
+
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
|
423 |
+
|
424 |
+
|
425 |
+
class ChineseCLIPOnnxConfig(OnnxConfig):
|
426 |
+
@property
|
427 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
428 |
+
return OrderedDict(
|
429 |
+
[
|
430 |
+
("input_ids", {0: "batch", 1: "sequence"}),
|
431 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
432 |
+
("attention_mask", {0: "batch", 1: "sequence"}),
|
433 |
+
]
|
434 |
+
)
|
435 |
+
|
436 |
+
@property
|
437 |
+
def outputs(self) -> Mapping[str, Mapping[int, str]]:
|
438 |
+
return OrderedDict(
|
439 |
+
[
|
440 |
+
("logits_per_image", {0: "batch"}),
|
441 |
+
("logits_per_text", {0: "batch"}),
|
442 |
+
("text_embeds", {0: "batch"}),
|
443 |
+
("image_embeds", {0: "batch"}),
|
444 |
+
]
|
445 |
+
)
|
446 |
+
|
447 |
+
@property
|
448 |
+
def atol_for_validation(self) -> float:
|
449 |
+
return 1e-4
|
450 |
+
|
451 |
+
def generate_dummy_inputs(
|
452 |
+
self,
|
453 |
+
processor: "ProcessorMixin",
|
454 |
+
batch_size: int = -1,
|
455 |
+
seq_length: int = -1,
|
456 |
+
framework: Optional["TensorType"] = None,
|
457 |
+
) -> Mapping[str, Any]:
|
458 |
+
text_input_dict = super().generate_dummy_inputs(
|
459 |
+
processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
|
460 |
+
)
|
461 |
+
image_input_dict = super().generate_dummy_inputs(
|
462 |
+
processor.image_processor, batch_size=batch_size, framework=framework
|
463 |
+
)
|
464 |
+
return {**text_input_dict, **image_input_dict}
|
465 |
+
|
466 |
+
@property
|
467 |
+
def default_onnx_opset(self) -> int:
|
468 |
+
return 14
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import argparse
|
17 |
+
|
18 |
+
import torch
|
19 |
+
|
20 |
+
from transformers import ChineseCLIPConfig, ChineseCLIPModel
|
21 |
+
|
22 |
+
|
23 |
+
def copy_attn_layer(hf_attn_layer, pt_weights, prefix):
|
24 |
+
q_proj, k_proj, v_proj = pt_weights[f"{prefix}.in_proj_weight"].chunk(3, dim=0)
|
25 |
+
q_proj_bias, k_proj_bias, v_proj_bias = pt_weights[f"{prefix}.in_proj_bias"].chunk(3, dim=0)
|
26 |
+
|
27 |
+
out_proj_weights = pt_weights[f"{prefix}.out_proj.weight"]
|
28 |
+
out_proj_bias = pt_weights[f"{prefix}.out_proj.bias"]
|
29 |
+
|
30 |
+
hf_attn_layer.q_proj.weight.data = q_proj
|
31 |
+
hf_attn_layer.q_proj.bias.data = q_proj_bias
|
32 |
+
|
33 |
+
hf_attn_layer.k_proj.weight.data = k_proj
|
34 |
+
hf_attn_layer.k_proj.bias.data = k_proj_bias
|
35 |
+
|
36 |
+
hf_attn_layer.v_proj.weight.data = v_proj
|
37 |
+
hf_attn_layer.v_proj.bias.data = v_proj_bias
|
38 |
+
|
39 |
+
hf_attn_layer.out_proj.weight.data = out_proj_weights
|
40 |
+
hf_attn_layer.out_proj.bias.data = out_proj_bias
|
41 |
+
|
42 |
+
|
43 |
+
def copy_mlp(hf_mlp, pt_weights, prefix):
|
44 |
+
copy_linear(hf_mlp.fc1, pt_weights, f"{prefix}.c_fc")
|
45 |
+
copy_linear(hf_mlp.fc2, pt_weights, f"{prefix}.c_proj")
|
46 |
+
|
47 |
+
|
48 |
+
def copy_linear(hf_linear, pt_weights, prefix):
|
49 |
+
hf_linear.weight.data = pt_weights[f"{prefix}.weight"].data
|
50 |
+
hf_linear.bias.data = pt_weights[f"{prefix}.bias"].data
|
51 |
+
|
52 |
+
|
53 |
+
def copy_layer(hf_layer, pt_weights, prefix):
|
54 |
+
# copy layer norms
|
55 |
+
copy_linear(hf_layer.layer_norm1, pt_weights, f"{prefix}.ln_1")
|
56 |
+
copy_linear(hf_layer.layer_norm2, pt_weights, f"{prefix}.ln_2")
|
57 |
+
|
58 |
+
# copy MLP
|
59 |
+
copy_mlp(hf_layer.mlp, pt_weights, f"{prefix}.mlp")
|
60 |
+
|
61 |
+
# copy attn
|
62 |
+
copy_attn_layer(hf_layer.self_attn, pt_weights, f"{prefix}.attn")
|
63 |
+
|
64 |
+
|
65 |
+
def copy_layers(hf_layers, pt_weights, prefix):
|
66 |
+
for layer_id, hf_layer in enumerate(hf_layers):
|
67 |
+
copy_layer(hf_layer, pt_weights, f"{prefix}.{layer_id}")
|
68 |
+
|
69 |
+
|
70 |
+
def copy_text_model_and_projection(hf_model, pt_weights):
|
71 |
+
# copy projection
|
72 |
+
hf_model.text_projection.weight.data = pt_weights["text_projection"].data.T
|
73 |
+
|
74 |
+
# copy text encoder
|
75 |
+
for name, param in hf_model.text_model.named_parameters():
|
76 |
+
param.data = pt_weights[f"bert.{name}"].data
|
77 |
+
|
78 |
+
|
79 |
+
def copy_vision_model_and_projection(hf_model, pt_weights):
|
80 |
+
# copy projection
|
81 |
+
hf_model.visual_projection.weight.data = pt_weights["visual.proj"].data.T
|
82 |
+
|
83 |
+
# copy layer norms
|
84 |
+
copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, "visual.ln_pre")
|
85 |
+
copy_linear(hf_model.vision_model.post_layernorm, pt_weights, "visual.ln_post")
|
86 |
+
|
87 |
+
# copy embeddings
|
88 |
+
hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights["visual.conv1.weight"].data
|
89 |
+
hf_model.vision_model.embeddings.class_embedding.data = pt_weights["visual.class_embedding"].data
|
90 |
+
hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights["visual.positional_embedding"].data
|
91 |
+
|
92 |
+
# copy encoder
|
93 |
+
copy_layers(hf_model.vision_model.encoder.layers, pt_weights, "visual.transformer.resblocks")
|
94 |
+
|
95 |
+
|
96 |
+
@torch.no_grad()
|
97 |
+
def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None):
|
98 |
+
"""
|
99 |
+
Copy/paste/tweak model's weights to transformers design.
|
100 |
+
"""
|
101 |
+
|
102 |
+
assert config_path is not None, "Please specify the ChineseCLIP model config of the corresponding model size."
|
103 |
+
config = ChineseCLIPConfig.from_pretrained(config_path)
|
104 |
+
|
105 |
+
hf_model = ChineseCLIPModel(config).eval()
|
106 |
+
|
107 |
+
pt_weights = torch.load(checkpoint_path, map_location="cpu")["state_dict"]
|
108 |
+
pt_weights = {(name[7:] if name.startswith("module.") else name): value for name, value in pt_weights.items()}
|
109 |
+
|
110 |
+
copy_text_model_and_projection(hf_model, pt_weights)
|
111 |
+
copy_vision_model_and_projection(hf_model, pt_weights)
|
112 |
+
hf_model.logit_scale.data = pt_weights["logit_scale"].data
|
113 |
+
|
114 |
+
hf_model.save_pretrained(pytorch_dump_folder_path)
|
115 |
+
|
116 |
+
|
117 |
+
if __name__ == "__main__":
|
118 |
+
parser = argparse.ArgumentParser()
|
119 |
+
parser.add_argument(
|
120 |
+
"--pytorch_dump_folder_path",
|
121 |
+
default=None,
|
122 |
+
type=str,
|
123 |
+
help="Path to the output folder storing converted hf PyTorch model.",
|
124 |
+
)
|
125 |
+
parser.add_argument(
|
126 |
+
"--checkpoint_path", default=None, type=str, help="Path to original github format ChineseCLIP checkpoint."
|
127 |
+
)
|
128 |
+
parser.add_argument(
|
129 |
+
"--config_path", default=None, required=True, type=str, help="Path to hf config.json of model to convert."
|
130 |
+
)
|
131 |
+
args = parser.parse_args()
|
132 |
+
|
133 |
+
convert_chinese_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
|
134 |
+
print("The conversion is finished!")
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2021 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Feature extractor class for Chinese-CLIP."""
|
16 |
+
|
17 |
+
import warnings
|
18 |
+
|
19 |
+
from ...utils import logging
|
20 |
+
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
|
21 |
+
|
22 |
+
|
23 |
+
logger = logging.get_logger(__name__)
|
24 |
+
|
25 |
+
|
26 |
+
class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor):
|
27 |
+
def __init__(self, *args, **kwargs) -> None:
|
28 |
+
warnings.warn(
|
29 |
+
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
|
30 |
+
" Please use ChineseCLIPImageProcessor instead.",
|
31 |
+
FutureWarning,
|
32 |
+
)
|
33 |
+
super().__init__(*args, **kwargs)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for Chinese-CLIP."""
|
16 |
+
|
17 |
+
from typing import Dict, List, Optional, Union
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
22 |
+
from ...image_transforms import (
|
23 |
+
convert_to_rgb,
|
24 |
+
get_resize_output_image_size,
|
25 |
+
resize,
|
26 |
+
to_channel_dimension_format,
|
27 |
+
)
|
28 |
+
from ...image_utils import (
|
29 |
+
OPENAI_CLIP_MEAN,
|
30 |
+
OPENAI_CLIP_STD,
|
31 |
+
ChannelDimension,
|
32 |
+
ImageInput,
|
33 |
+
PILImageResampling,
|
34 |
+
infer_channel_dimension_format,
|
35 |
+
is_scaled_image,
|
36 |
+
make_list_of_images,
|
37 |
+
to_numpy_array,
|
38 |
+
valid_images,
|
39 |
+
validate_kwargs,
|
40 |
+
validate_preprocess_arguments,
|
41 |
+
)
|
42 |
+
from ...utils import TensorType, is_vision_available, logging
|
43 |
+
|
44 |
+
|
45 |
+
logger = logging.get_logger(__name__)
|
46 |
+
|
47 |
+
|
48 |
+
if is_vision_available():
|
49 |
+
import PIL
|
50 |
+
|
51 |
+
|
52 |
+
class ChineseCLIPImageProcessor(BaseImageProcessor):
|
53 |
+
r"""
|
54 |
+
Constructs a Chinese-CLIP image processor.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
58 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
|
59 |
+
`do_resize` in the `preprocess` method.
|
60 |
+
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
|
61 |
+
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
|
62 |
+
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
|
63 |
+
method.
|
64 |
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
65 |
+
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
|
66 |
+
do_center_crop (`bool`, *optional*, defaults to `True`):
|
67 |
+
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
|
68 |
+
`preprocess` method.
|
69 |
+
crop_size (`Dict[str, int]` *optional*, defaults to 224):
|
70 |
+
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
|
71 |
+
method.
|
72 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
73 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
|
74 |
+
the `preprocess` method.
|
75 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
76 |
+
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
|
77 |
+
method.
|
78 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
79 |
+
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
|
80 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
81 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
82 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
83 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
84 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
85 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
86 |
+
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
87 |
+
do_convert_rgb (`bool`, *optional*, defaults to `True`):
|
88 |
+
Whether to convert the image to RGB.
|
89 |
+
"""
|
90 |
+
|
91 |
+
model_input_names = ["pixel_values"]
|
92 |
+
|
93 |
+
def __init__(
|
94 |
+
self,
|
95 |
+
do_resize: bool = True,
|
96 |
+
size: Dict[str, int] = None,
|
97 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
98 |
+
do_center_crop: bool = True,
|
99 |
+
crop_size: Dict[str, int] = None,
|
100 |
+
do_rescale: bool = True,
|
101 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
102 |
+
do_normalize: bool = True,
|
103 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
104 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
105 |
+
do_convert_rgb: bool = True,
|
106 |
+
**kwargs,
|
107 |
+
) -> None:
|
108 |
+
super().__init__(**kwargs)
|
109 |
+
size = size if size is not None else {"shortest_edge": 224}
|
110 |
+
size = get_size_dict(size, default_to_square=False)
|
111 |
+
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
|
112 |
+
crop_size = get_size_dict(crop_size)
|
113 |
+
|
114 |
+
self.do_resize = do_resize
|
115 |
+
self.size = size
|
116 |
+
self.resample = resample
|
117 |
+
self.do_center_crop = do_center_crop
|
118 |
+
self.crop_size = crop_size
|
119 |
+
self.do_rescale = do_rescale
|
120 |
+
self.rescale_factor = rescale_factor
|
121 |
+
self.do_normalize = do_normalize
|
122 |
+
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
|
123 |
+
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
|
124 |
+
self.do_convert_rgb = do_convert_rgb
|
125 |
+
self._valid_processor_keys = [
|
126 |
+
"images",
|
127 |
+
"do_resize",
|
128 |
+
"size",
|
129 |
+
"resample",
|
130 |
+
"do_center_crop",
|
131 |
+
"crop_size",
|
132 |
+
"do_rescale",
|
133 |
+
"rescale_factor",
|
134 |
+
"do_normalize",
|
135 |
+
"image_mean",
|
136 |
+
"image_std",
|
137 |
+
"do_convert_rgb",
|
138 |
+
"return_tensors",
|
139 |
+
"data_format",
|
140 |
+
"input_data_format",
|
141 |
+
]
|
142 |
+
|
143 |
+
def resize(
|
144 |
+
self,
|
145 |
+
image: np.ndarray,
|
146 |
+
size: Dict[str, int],
|
147 |
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
148 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
149 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
150 |
+
**kwargs,
|
151 |
+
) -> np.ndarray:
|
152 |
+
"""
|
153 |
+
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
|
154 |
+
resized to keep the input aspect ratio.
|
155 |
+
|
156 |
+
Args:
|
157 |
+
image (`np.ndarray`):
|
158 |
+
Image to resize.
|
159 |
+
size (`Dict[str, int]`):
|
160 |
+
Size of the output image.
|
161 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
|
162 |
+
Resampling filter to use when resiizing the image.
|
163 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
164 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
165 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
166 |
+
The channel dimension format of the input image. If not provided, it will be inferred from the input
|
167 |
+
image.
|
168 |
+
"""
|
169 |
+
size = get_size_dict(size, default_to_square=False)
|
170 |
+
output_size = get_resize_output_image_size(
|
171 |
+
image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format
|
172 |
+
)
|
173 |
+
return resize(
|
174 |
+
image,
|
175 |
+
size=output_size,
|
176 |
+
resample=resample,
|
177 |
+
data_format=data_format,
|
178 |
+
input_data_format=input_data_format,
|
179 |
+
**kwargs,
|
180 |
+
)
|
181 |
+
|
182 |
+
def preprocess(
|
183 |
+
self,
|
184 |
+
images: ImageInput,
|
185 |
+
do_resize: bool = None,
|
186 |
+
size: Dict[str, int] = None,
|
187 |
+
resample: PILImageResampling = None,
|
188 |
+
do_center_crop: bool = None,
|
189 |
+
crop_size: int = None,
|
190 |
+
do_rescale: bool = None,
|
191 |
+
rescale_factor: float = None,
|
192 |
+
do_normalize: bool = None,
|
193 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
194 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
195 |
+
do_convert_rgb: bool = None,
|
196 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
197 |
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
198 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
199 |
+
**kwargs,
|
200 |
+
) -> PIL.Image.Image:
|
201 |
+
"""
|
202 |
+
Preprocess an image or batch of images.
|
203 |
+
|
204 |
+
Args:
|
205 |
+
images (`ImageInput`):
|
206 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
207 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
208 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
209 |
+
Whether to resize the image.
|
210 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
211 |
+
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
|
212 |
+
the longest edge resized to keep the input aspect ratio.
|
213 |
+
resample (`int`, *optional*, defaults to `self.resample`):
|
214 |
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
|
215 |
+
has an effect if `do_resize` is set to `True`.
|
216 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
217 |
+
Whether to center crop the image.
|
218 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
219 |
+
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
|
220 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
221 |
+
Whether to rescale the image.
|
222 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
223 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
224 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
225 |
+
Whether to normalize the image.
|
226 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
227 |
+
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
|
228 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
229 |
+
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
|
230 |
+
`True`.
|
231 |
+
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
232 |
+
Whether to convert the image to RGB.
|
233 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
234 |
+
The type of tensors to return. Can be one of:
|
235 |
+
- Unset: Return a list of `np.ndarray`.
|
236 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
237 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
238 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
239 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
240 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
241 |
+
The channel dimension format for the output image. Can be one of:
|
242 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
243 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
244 |
+
- Unset: Use the channel dimension format of the input image.
|
245 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
246 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
247 |
+
from the input image. Can be one of:
|
248 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
249 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
250 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
251 |
+
"""
|
252 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
253 |
+
size = size if size is not None else self.size
|
254 |
+
size = get_size_dict(size, default_to_square=False)
|
255 |
+
resample = resample if resample is not None else self.resample
|
256 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
257 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
258 |
+
crop_size = get_size_dict(crop_size)
|
259 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
260 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
261 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
262 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
263 |
+
image_std = image_std if image_std is not None else self.image_std
|
264 |
+
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
|
265 |
+
|
266 |
+
images = make_list_of_images(images)
|
267 |
+
|
268 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
269 |
+
|
270 |
+
if not valid_images(images):
|
271 |
+
raise ValueError(
|
272 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
273 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
274 |
+
)
|
275 |
+
validate_preprocess_arguments(
|
276 |
+
do_rescale=do_rescale,
|
277 |
+
rescale_factor=rescale_factor,
|
278 |
+
do_normalize=do_normalize,
|
279 |
+
image_mean=image_mean,
|
280 |
+
image_std=image_std,
|
281 |
+
do_center_crop=do_center_crop,
|
282 |
+
crop_size=crop_size,
|
283 |
+
do_resize=do_resize,
|
284 |
+
size=size,
|
285 |
+
resample=resample,
|
286 |
+
)
|
287 |
+
if do_convert_rgb:
|
288 |
+
images = [convert_to_rgb(image) for image in images]
|
289 |
+
|
290 |
+
# All transformations expect numpy arrays.
|
291 |
+
images = [to_numpy_array(image) for image in images]
|
292 |
+
|
293 |
+
if is_scaled_image(images[0]) and do_rescale:
|
294 |
+
logger.warning_once(
|
295 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
296 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
297 |
+
)
|
298 |
+
|
299 |
+
if input_data_format is None:
|
300 |
+
# We assume that all images have the same channel dimension format.
|
301 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
302 |
+
|
303 |
+
if do_resize:
|
304 |
+
images = [
|
305 |
+
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
306 |
+
for image in images
|
307 |
+
]
|
308 |
+
|
309 |
+
if do_center_crop:
|
310 |
+
images = [
|
311 |
+
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
|
312 |
+
]
|
313 |
+
|
314 |
+
if do_rescale:
|
315 |
+
images = [
|
316 |
+
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
|
317 |
+
for image in images
|
318 |
+
]
|
319 |
+
|
320 |
+
if do_normalize:
|
321 |
+
images = [
|
322 |
+
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
323 |
+
for image in images
|
324 |
+
]
|
325 |
+
|
326 |
+
images = [
|
327 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
|
328 |
+
]
|
329 |
+
|
330 |
+
data = {"pixel_values": images}
|
331 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py
ADDED
@@ -0,0 +1,1562 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch Chinese-CLIP model."""
|
16 |
+
|
17 |
+
|
18 |
+
import math
|
19 |
+
from dataclasses import dataclass
|
20 |
+
from typing import Any, List, Optional, Tuple, Union
|
21 |
+
|
22 |
+
import torch
|
23 |
+
import torch.utils.checkpoint
|
24 |
+
from torch import nn
|
25 |
+
|
26 |
+
from ...activations import ACT2FN
|
27 |
+
from ...modeling_outputs import (
|
28 |
+
BaseModelOutput,
|
29 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
30 |
+
BaseModelOutputWithPooling,
|
31 |
+
BaseModelOutputWithPoolingAndCrossAttentions,
|
32 |
+
)
|
33 |
+
from ...modeling_utils import PreTrainedModel
|
34 |
+
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
|
35 |
+
from ...utils import (
|
36 |
+
ModelOutput,
|
37 |
+
add_code_sample_docstrings,
|
38 |
+
add_start_docstrings,
|
39 |
+
add_start_docstrings_to_model_forward,
|
40 |
+
logging,
|
41 |
+
replace_return_docstrings,
|
42 |
+
)
|
43 |
+
from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig
|
44 |
+
|
45 |
+
|
46 |
+
logger = logging.get_logger(__name__)
|
47 |
+
|
48 |
+
_CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16"
|
49 |
+
_CONFIG_FOR_DOC = "ChineseCLIPConfig"
|
50 |
+
|
51 |
+
|
52 |
+
from ..deprecated._archive_maps import CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
53 |
+
|
54 |
+
|
55 |
+
# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
|
56 |
+
# Copied from transformers.models.clip.modeling_clip.contrastive_loss
|
57 |
+
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
|
58 |
+
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
|
59 |
+
|
60 |
+
|
61 |
+
def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor:
|
62 |
+
caption_loss = contrastive_loss(similarity)
|
63 |
+
image_loss = contrastive_loss(similarity.t())
|
64 |
+
return (caption_loss + image_loss) / 2.0
|
65 |
+
|
66 |
+
|
67 |
+
@dataclass
|
68 |
+
class ChineseCLIPOutput(ModelOutput):
|
69 |
+
"""
|
70 |
+
Args:
|
71 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
72 |
+
Contrastive loss for image-text similarity.
|
73 |
+
logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
|
74 |
+
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
75 |
+
similarity scores.
|
76 |
+
logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
|
77 |
+
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
78 |
+
similarity scores.
|
79 |
+
text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
80 |
+
The text embeddings obtained by applying the projection layer to the pooled output of
|
81 |
+
[`ChineseCLIPTextModel`].
|
82 |
+
image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
83 |
+
The image embeddings obtained by applying the projection layer to the pooled output of
|
84 |
+
[`ChineseCLIPVisionModel`].
|
85 |
+
text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
|
86 |
+
The output of the [`ChineseCLIPTextModel`].
|
87 |
+
vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
|
88 |
+
The output of the [`ChineseCLIPVisionModel`].
|
89 |
+
"""
|
90 |
+
|
91 |
+
loss: Optional[torch.FloatTensor] = None
|
92 |
+
logits_per_image: torch.FloatTensor = None
|
93 |
+
logits_per_text: torch.FloatTensor = None
|
94 |
+
text_embeds: torch.FloatTensor = None
|
95 |
+
image_embeds: torch.FloatTensor = None
|
96 |
+
text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
|
97 |
+
vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
|
98 |
+
|
99 |
+
def to_tuple(self) -> Tuple[Any]:
|
100 |
+
return tuple(
|
101 |
+
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
102 |
+
for k in self.keys()
|
103 |
+
)
|
104 |
+
|
105 |
+
|
106 |
+
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText
|
107 |
+
class ChineseCLIPTextEmbeddings(nn.Module):
|
108 |
+
"""Construct the embeddings from word, position and token_type embeddings."""
|
109 |
+
|
110 |
+
def __init__(self, config):
|
111 |
+
super().__init__()
|
112 |
+
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
113 |
+
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
114 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
115 |
+
|
116 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
117 |
+
# any TensorFlow checkpoint file
|
118 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
119 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
120 |
+
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
121 |
+
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
122 |
+
self.register_buffer(
|
123 |
+
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
124 |
+
)
|
125 |
+
self.register_buffer(
|
126 |
+
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
|
127 |
+
)
|
128 |
+
|
129 |
+
def forward(
|
130 |
+
self,
|
131 |
+
input_ids: Optional[torch.LongTensor] = None,
|
132 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
133 |
+
position_ids: Optional[torch.LongTensor] = None,
|
134 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
135 |
+
past_key_values_length: int = 0,
|
136 |
+
) -> torch.Tensor:
|
137 |
+
if input_ids is not None:
|
138 |
+
input_shape = input_ids.size()
|
139 |
+
else:
|
140 |
+
input_shape = inputs_embeds.size()[:-1]
|
141 |
+
|
142 |
+
seq_length = input_shape[1]
|
143 |
+
|
144 |
+
if position_ids is None:
|
145 |
+
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
146 |
+
|
147 |
+
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
|
148 |
+
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
|
149 |
+
# issue #5664
|
150 |
+
if token_type_ids is None:
|
151 |
+
if hasattr(self, "token_type_ids"):
|
152 |
+
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
|
153 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
|
154 |
+
token_type_ids = buffered_token_type_ids_expanded
|
155 |
+
else:
|
156 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
157 |
+
|
158 |
+
if inputs_embeds is None:
|
159 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
160 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
161 |
+
|
162 |
+
embeddings = inputs_embeds + token_type_embeddings
|
163 |
+
if self.position_embedding_type == "absolute":
|
164 |
+
position_embeddings = self.position_embeddings(position_ids)
|
165 |
+
embeddings += position_embeddings
|
166 |
+
embeddings = self.LayerNorm(embeddings)
|
167 |
+
embeddings = self.dropout(embeddings)
|
168 |
+
return embeddings
|
169 |
+
|
170 |
+
|
171 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP
|
172 |
+
class ChineseCLIPVisionEmbeddings(nn.Module):
|
173 |
+
def __init__(self, config: ChineseCLIPVisionConfig):
|
174 |
+
super().__init__()
|
175 |
+
self.config = config
|
176 |
+
self.embed_dim = config.hidden_size
|
177 |
+
self.image_size = config.image_size
|
178 |
+
self.patch_size = config.patch_size
|
179 |
+
|
180 |
+
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
181 |
+
|
182 |
+
self.patch_embedding = nn.Conv2d(
|
183 |
+
in_channels=config.num_channels,
|
184 |
+
out_channels=self.embed_dim,
|
185 |
+
kernel_size=self.patch_size,
|
186 |
+
stride=self.patch_size,
|
187 |
+
bias=False,
|
188 |
+
)
|
189 |
+
|
190 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
191 |
+
self.num_positions = self.num_patches + 1
|
192 |
+
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
193 |
+
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
194 |
+
|
195 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
196 |
+
batch_size = pixel_values.shape[0]
|
197 |
+
target_dtype = self.patch_embedding.weight.dtype
|
198 |
+
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
199 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
200 |
+
|
201 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
202 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
203 |
+
embeddings = embeddings + self.position_embedding(self.position_ids)
|
204 |
+
return embeddings
|
205 |
+
|
206 |
+
|
207 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText
|
208 |
+
class ChineseCLIPTextSelfAttention(nn.Module):
|
209 |
+
def __init__(self, config, position_embedding_type=None):
|
210 |
+
super().__init__()
|
211 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
212 |
+
raise ValueError(
|
213 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
214 |
+
f"heads ({config.num_attention_heads})"
|
215 |
+
)
|
216 |
+
|
217 |
+
self.num_attention_heads = config.num_attention_heads
|
218 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
219 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
220 |
+
|
221 |
+
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
222 |
+
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
223 |
+
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
224 |
+
|
225 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
226 |
+
self.position_embedding_type = position_embedding_type or getattr(
|
227 |
+
config, "position_embedding_type", "absolute"
|
228 |
+
)
|
229 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
230 |
+
self.max_position_embeddings = config.max_position_embeddings
|
231 |
+
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
232 |
+
|
233 |
+
self.is_decoder = config.is_decoder
|
234 |
+
|
235 |
+
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
|
236 |
+
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
237 |
+
x = x.view(new_x_shape)
|
238 |
+
return x.permute(0, 2, 1, 3)
|
239 |
+
|
240 |
+
def forward(
|
241 |
+
self,
|
242 |
+
hidden_states: torch.Tensor,
|
243 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
244 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
245 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
246 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
247 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
248 |
+
output_attentions: Optional[bool] = False,
|
249 |
+
) -> Tuple[torch.Tensor]:
|
250 |
+
mixed_query_layer = self.query(hidden_states)
|
251 |
+
|
252 |
+
# If this is instantiated as a cross-attention module, the keys
|
253 |
+
# and values come from an encoder; the attention mask needs to be
|
254 |
+
# such that the encoder's padding tokens are not attended to.
|
255 |
+
is_cross_attention = encoder_hidden_states is not None
|
256 |
+
|
257 |
+
if is_cross_attention and past_key_value is not None:
|
258 |
+
# reuse k,v, cross_attentions
|
259 |
+
key_layer = past_key_value[0]
|
260 |
+
value_layer = past_key_value[1]
|
261 |
+
attention_mask = encoder_attention_mask
|
262 |
+
elif is_cross_attention:
|
263 |
+
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
264 |
+
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
265 |
+
attention_mask = encoder_attention_mask
|
266 |
+
elif past_key_value is not None:
|
267 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
268 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
269 |
+
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
270 |
+
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
271 |
+
else:
|
272 |
+
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
273 |
+
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
274 |
+
|
275 |
+
query_layer = self.transpose_for_scores(mixed_query_layer)
|
276 |
+
|
277 |
+
use_cache = past_key_value is not None
|
278 |
+
if self.is_decoder:
|
279 |
+
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
|
280 |
+
# Further calls to cross_attention layer can then reuse all cross-attention
|
281 |
+
# key/value_states (first "if" case)
|
282 |
+
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
|
283 |
+
# all previous decoder key/value_states. Further calls to uni-directional self-attention
|
284 |
+
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
|
285 |
+
# if encoder bi-directional self-attention `past_key_value` is always `None`
|
286 |
+
past_key_value = (key_layer, value_layer)
|
287 |
+
|
288 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
289 |
+
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
290 |
+
|
291 |
+
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
292 |
+
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
|
293 |
+
if use_cache:
|
294 |
+
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
|
295 |
+
-1, 1
|
296 |
+
)
|
297 |
+
else:
|
298 |
+
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
299 |
+
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
300 |
+
distance = position_ids_l - position_ids_r
|
301 |
+
|
302 |
+
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
303 |
+
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
304 |
+
|
305 |
+
if self.position_embedding_type == "relative_key":
|
306 |
+
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
307 |
+
attention_scores = attention_scores + relative_position_scores
|
308 |
+
elif self.position_embedding_type == "relative_key_query":
|
309 |
+
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
310 |
+
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
311 |
+
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
312 |
+
|
313 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
314 |
+
if attention_mask is not None:
|
315 |
+
# Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function)
|
316 |
+
attention_scores = attention_scores + attention_mask
|
317 |
+
|
318 |
+
# Normalize the attention scores to probabilities.
|
319 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
320 |
+
|
321 |
+
# This is actually dropping out entire tokens to attend to, which might
|
322 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
323 |
+
attention_probs = self.dropout(attention_probs)
|
324 |
+
|
325 |
+
# Mask heads if we want to
|
326 |
+
if head_mask is not None:
|
327 |
+
attention_probs = attention_probs * head_mask
|
328 |
+
|
329 |
+
context_layer = torch.matmul(attention_probs, value_layer)
|
330 |
+
|
331 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
332 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
333 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
334 |
+
|
335 |
+
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
336 |
+
|
337 |
+
if self.is_decoder:
|
338 |
+
outputs = outputs + (past_key_value,)
|
339 |
+
return outputs
|
340 |
+
|
341 |
+
|
342 |
+
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText
|
343 |
+
class ChineseCLIPTextSelfOutput(nn.Module):
|
344 |
+
def __init__(self, config):
|
345 |
+
super().__init__()
|
346 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
347 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
348 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
349 |
+
|
350 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
351 |
+
hidden_states = self.dense(hidden_states)
|
352 |
+
hidden_states = self.dropout(hidden_states)
|
353 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
354 |
+
return hidden_states
|
355 |
+
|
356 |
+
|
357 |
+
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText
|
358 |
+
class ChineseCLIPTextAttention(nn.Module):
|
359 |
+
def __init__(self, config, position_embedding_type=None):
|
360 |
+
super().__init__()
|
361 |
+
self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type)
|
362 |
+
self.output = ChineseCLIPTextSelfOutput(config)
|
363 |
+
self.pruned_heads = set()
|
364 |
+
|
365 |
+
def prune_heads(self, heads):
|
366 |
+
if len(heads) == 0:
|
367 |
+
return
|
368 |
+
heads, index = find_pruneable_heads_and_indices(
|
369 |
+
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
370 |
+
)
|
371 |
+
|
372 |
+
# Prune linear layers
|
373 |
+
self.self.query = prune_linear_layer(self.self.query, index)
|
374 |
+
self.self.key = prune_linear_layer(self.self.key, index)
|
375 |
+
self.self.value = prune_linear_layer(self.self.value, index)
|
376 |
+
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
377 |
+
|
378 |
+
# Update hyper params and store pruned heads
|
379 |
+
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
380 |
+
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
381 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
382 |
+
|
383 |
+
def forward(
|
384 |
+
self,
|
385 |
+
hidden_states: torch.Tensor,
|
386 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
387 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
388 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
389 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
390 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
391 |
+
output_attentions: Optional[bool] = False,
|
392 |
+
) -> Tuple[torch.Tensor]:
|
393 |
+
self_outputs = self.self(
|
394 |
+
hidden_states,
|
395 |
+
attention_mask,
|
396 |
+
head_mask,
|
397 |
+
encoder_hidden_states,
|
398 |
+
encoder_attention_mask,
|
399 |
+
past_key_value,
|
400 |
+
output_attentions,
|
401 |
+
)
|
402 |
+
attention_output = self.output(self_outputs[0], hidden_states)
|
403 |
+
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
404 |
+
return outputs
|
405 |
+
|
406 |
+
|
407 |
+
class ChineseCLIPVisionAttention(nn.Module):
|
408 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
409 |
+
|
410 |
+
def __init__(self, config):
|
411 |
+
super().__init__()
|
412 |
+
self.config = config
|
413 |
+
self.embed_dim = config.hidden_size
|
414 |
+
self.num_heads = config.num_attention_heads
|
415 |
+
self.head_dim = self.embed_dim // self.num_heads
|
416 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
417 |
+
raise ValueError(
|
418 |
+
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
419 |
+
f" {self.num_heads})."
|
420 |
+
)
|
421 |
+
self.scale = self.head_dim**-0.5
|
422 |
+
self.dropout = config.attention_dropout
|
423 |
+
|
424 |
+
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
425 |
+
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
426 |
+
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
427 |
+
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
428 |
+
|
429 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
430 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
431 |
+
|
432 |
+
def forward(
|
433 |
+
self,
|
434 |
+
hidden_states: torch.Tensor,
|
435 |
+
output_attentions: Optional[bool] = False,
|
436 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
437 |
+
"""Input shape: Batch x Time x Channel"""
|
438 |
+
|
439 |
+
bsz, tgt_len, embed_dim = hidden_states.size()
|
440 |
+
|
441 |
+
# get query proj
|
442 |
+
query_states = self.q_proj(hidden_states) * self.scale
|
443 |
+
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
444 |
+
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
445 |
+
|
446 |
+
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
447 |
+
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
448 |
+
key_states = key_states.view(*proj_shape)
|
449 |
+
value_states = value_states.view(*proj_shape)
|
450 |
+
|
451 |
+
src_len = key_states.size(1)
|
452 |
+
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
453 |
+
|
454 |
+
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
455 |
+
raise ValueError(
|
456 |
+
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
457 |
+
f" {attn_weights.size()}"
|
458 |
+
)
|
459 |
+
|
460 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
461 |
+
|
462 |
+
if output_attentions:
|
463 |
+
# this operation is a bit akward, but it's required to
|
464 |
+
# make sure that attn_weights keeps its gradient.
|
465 |
+
# In order to do so, attn_weights have to reshaped
|
466 |
+
# twice and have to be reused in the following
|
467 |
+
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
468 |
+
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
469 |
+
else:
|
470 |
+
attn_weights_reshaped = None
|
471 |
+
|
472 |
+
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
473 |
+
|
474 |
+
attn_output = torch.bmm(attn_probs, value_states)
|
475 |
+
|
476 |
+
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
477 |
+
raise ValueError(
|
478 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
479 |
+
f" {attn_output.size()}"
|
480 |
+
)
|
481 |
+
|
482 |
+
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
483 |
+
attn_output = attn_output.transpose(1, 2)
|
484 |
+
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
485 |
+
|
486 |
+
attn_output = self.out_proj(attn_output)
|
487 |
+
|
488 |
+
return attn_output, attn_weights_reshaped
|
489 |
+
|
490 |
+
|
491 |
+
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText
|
492 |
+
class ChineseCLIPTextIntermediate(nn.Module):
|
493 |
+
def __init__(self, config):
|
494 |
+
super().__init__()
|
495 |
+
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
496 |
+
if isinstance(config.hidden_act, str):
|
497 |
+
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
498 |
+
else:
|
499 |
+
self.intermediate_act_fn = config.hidden_act
|
500 |
+
|
501 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
502 |
+
hidden_states = self.dense(hidden_states)
|
503 |
+
hidden_states = self.intermediate_act_fn(hidden_states)
|
504 |
+
return hidden_states
|
505 |
+
|
506 |
+
|
507 |
+
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText
|
508 |
+
class ChineseCLIPTextOutput(nn.Module):
|
509 |
+
def __init__(self, config):
|
510 |
+
super().__init__()
|
511 |
+
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
512 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
513 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
514 |
+
|
515 |
+
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
|
516 |
+
hidden_states = self.dense(hidden_states)
|
517 |
+
hidden_states = self.dropout(hidden_states)
|
518 |
+
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
519 |
+
return hidden_states
|
520 |
+
|
521 |
+
|
522 |
+
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision
|
523 |
+
class ChineseCLIPVisionMLP(nn.Module):
|
524 |
+
def __init__(self, config):
|
525 |
+
super().__init__()
|
526 |
+
self.config = config
|
527 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
528 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
529 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
530 |
+
|
531 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
532 |
+
hidden_states = self.fc1(hidden_states)
|
533 |
+
hidden_states = self.activation_fn(hidden_states)
|
534 |
+
hidden_states = self.fc2(hidden_states)
|
535 |
+
return hidden_states
|
536 |
+
|
537 |
+
|
538 |
+
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText
|
539 |
+
class ChineseCLIPTextLayer(nn.Module):
|
540 |
+
def __init__(self, config):
|
541 |
+
super().__init__()
|
542 |
+
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
543 |
+
self.seq_len_dim = 1
|
544 |
+
self.attention = ChineseCLIPTextAttention(config)
|
545 |
+
self.is_decoder = config.is_decoder
|
546 |
+
self.add_cross_attention = config.add_cross_attention
|
547 |
+
if self.add_cross_attention:
|
548 |
+
if not self.is_decoder:
|
549 |
+
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
|
550 |
+
self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute")
|
551 |
+
self.intermediate = ChineseCLIPTextIntermediate(config)
|
552 |
+
self.output = ChineseCLIPTextOutput(config)
|
553 |
+
|
554 |
+
def forward(
|
555 |
+
self,
|
556 |
+
hidden_states: torch.Tensor,
|
557 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
558 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
559 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
560 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
561 |
+
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
562 |
+
output_attentions: Optional[bool] = False,
|
563 |
+
) -> Tuple[torch.Tensor]:
|
564 |
+
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
565 |
+
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
566 |
+
self_attention_outputs = self.attention(
|
567 |
+
hidden_states,
|
568 |
+
attention_mask,
|
569 |
+
head_mask,
|
570 |
+
output_attentions=output_attentions,
|
571 |
+
past_key_value=self_attn_past_key_value,
|
572 |
+
)
|
573 |
+
attention_output = self_attention_outputs[0]
|
574 |
+
|
575 |
+
# if decoder, the last output is tuple of self-attn cache
|
576 |
+
if self.is_decoder:
|
577 |
+
outputs = self_attention_outputs[1:-1]
|
578 |
+
present_key_value = self_attention_outputs[-1]
|
579 |
+
else:
|
580 |
+
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
581 |
+
|
582 |
+
cross_attn_present_key_value = None
|
583 |
+
if self.is_decoder and encoder_hidden_states is not None:
|
584 |
+
if not hasattr(self, "crossattention"):
|
585 |
+
raise ValueError(
|
586 |
+
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
|
587 |
+
" by setting `config.add_cross_attention=True`"
|
588 |
+
)
|
589 |
+
|
590 |
+
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
|
591 |
+
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
|
592 |
+
cross_attention_outputs = self.crossattention(
|
593 |
+
attention_output,
|
594 |
+
attention_mask,
|
595 |
+
head_mask,
|
596 |
+
encoder_hidden_states,
|
597 |
+
encoder_attention_mask,
|
598 |
+
cross_attn_past_key_value,
|
599 |
+
output_attentions,
|
600 |
+
)
|
601 |
+
attention_output = cross_attention_outputs[0]
|
602 |
+
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
603 |
+
|
604 |
+
# add cross-attn cache to positions 3,4 of present_key_value tuple
|
605 |
+
cross_attn_present_key_value = cross_attention_outputs[-1]
|
606 |
+
present_key_value = present_key_value + cross_attn_present_key_value
|
607 |
+
|
608 |
+
layer_output = apply_chunking_to_forward(
|
609 |
+
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
610 |
+
)
|
611 |
+
outputs = (layer_output,) + outputs
|
612 |
+
|
613 |
+
# if decoder, return the attn key/values as the last output
|
614 |
+
if self.is_decoder:
|
615 |
+
outputs = outputs + (present_key_value,)
|
616 |
+
|
617 |
+
return outputs
|
618 |
+
|
619 |
+
def feed_forward_chunk(self, attention_output):
|
620 |
+
intermediate_output = self.intermediate(attention_output)
|
621 |
+
layer_output = self.output(intermediate_output, attention_output)
|
622 |
+
return layer_output
|
623 |
+
|
624 |
+
|
625 |
+
class ChineseCLIPVisionLayer(nn.Module):
|
626 |
+
def __init__(self, config: ChineseCLIPConfig):
|
627 |
+
super().__init__()
|
628 |
+
self.embed_dim = config.hidden_size
|
629 |
+
self.self_attn = ChineseCLIPVisionAttention(config)
|
630 |
+
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
631 |
+
self.mlp = ChineseCLIPVisionMLP(config)
|
632 |
+
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
633 |
+
|
634 |
+
def forward(
|
635 |
+
self,
|
636 |
+
hidden_states: torch.Tensor,
|
637 |
+
output_attentions: Optional[bool] = False,
|
638 |
+
) -> Tuple[torch.FloatTensor]:
|
639 |
+
"""
|
640 |
+
Args:
|
641 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
642 |
+
output_attentions (`bool`, *optional*):
|
643 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
644 |
+
returned tensors for more detail.
|
645 |
+
"""
|
646 |
+
residual = hidden_states
|
647 |
+
|
648 |
+
hidden_states = self.layer_norm1(hidden_states)
|
649 |
+
hidden_states, attn_weights = self.self_attn(
|
650 |
+
hidden_states=hidden_states,
|
651 |
+
output_attentions=output_attentions,
|
652 |
+
)
|
653 |
+
hidden_states = residual + hidden_states
|
654 |
+
|
655 |
+
residual = hidden_states
|
656 |
+
hidden_states = self.layer_norm2(hidden_states)
|
657 |
+
hidden_states = self.mlp(hidden_states)
|
658 |
+
hidden_states = residual + hidden_states
|
659 |
+
|
660 |
+
outputs = (hidden_states,)
|
661 |
+
|
662 |
+
if output_attentions:
|
663 |
+
outputs += (attn_weights,)
|
664 |
+
|
665 |
+
return outputs
|
666 |
+
|
667 |
+
|
668 |
+
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText
|
669 |
+
class ChineseCLIPTextPooler(nn.Module):
|
670 |
+
def __init__(self, config):
|
671 |
+
super().__init__()
|
672 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
673 |
+
self.activation = nn.Tanh()
|
674 |
+
|
675 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
676 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
677 |
+
# to the first token.
|
678 |
+
first_token_tensor = hidden_states[:, 0]
|
679 |
+
pooled_output = self.dense(first_token_tensor)
|
680 |
+
pooled_output = self.activation(pooled_output)
|
681 |
+
return pooled_output
|
682 |
+
|
683 |
+
|
684 |
+
class ChineseCLIPPreTrainedModel(PreTrainedModel):
|
685 |
+
"""
|
686 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
687 |
+
models.
|
688 |
+
"""
|
689 |
+
|
690 |
+
config_class = ChineseCLIPConfig
|
691 |
+
base_model_prefix = "chinese_clip"
|
692 |
+
supports_gradient_checkpointing = True
|
693 |
+
|
694 |
+
def _init_weights(self, module):
|
695 |
+
"""Initialize the weights"""
|
696 |
+
factor = self.config.initializer_factor
|
697 |
+
if isinstance(module, ChineseCLIPVisionEmbeddings):
|
698 |
+
factor = self.config.initializer_factor
|
699 |
+
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
|
700 |
+
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
|
701 |
+
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
|
702 |
+
elif isinstance(module, ChineseCLIPTextEmbeddings):
|
703 |
+
nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range)
|
704 |
+
nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range)
|
705 |
+
nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range)
|
706 |
+
for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]:
|
707 |
+
if embedding.padding_idx is not None:
|
708 |
+
embedding.weight.data[embedding.padding_idx].zero_()
|
709 |
+
elif isinstance(module, ChineseCLIPVisionAttention):
|
710 |
+
factor = self.config.initializer_factor
|
711 |
+
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
712 |
+
out_proj_std = (module.embed_dim**-0.5) * factor
|
713 |
+
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
|
714 |
+
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
|
715 |
+
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
|
716 |
+
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
|
717 |
+
elif isinstance(module, ChineseCLIPVisionMLP):
|
718 |
+
factor = self.config.initializer_factor
|
719 |
+
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
720 |
+
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
|
721 |
+
nn.init.normal_(module.fc1.weight, std=fc_std)
|
722 |
+
nn.init.normal_(module.fc2.weight, std=in_proj_std)
|
723 |
+
elif isinstance(module, ChineseCLIPModel):
|
724 |
+
nn.init.normal_(
|
725 |
+
module.text_projection.weight,
|
726 |
+
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
|
727 |
+
)
|
728 |
+
nn.init.normal_(
|
729 |
+
module.visual_projection.weight,
|
730 |
+
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
|
731 |
+
)
|
732 |
+
|
733 |
+
if isinstance(module, nn.LayerNorm):
|
734 |
+
module.bias.data.zero_()
|
735 |
+
module.weight.data.fill_(1.0)
|
736 |
+
if isinstance(module, nn.Linear):
|
737 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
738 |
+
if module.bias is not None:
|
739 |
+
module.bias.data.zero_()
|
740 |
+
|
741 |
+
|
742 |
+
CHINESE_CLIP_START_DOCSTRING = r"""
|
743 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
744 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
745 |
+
behavior.
|
746 |
+
|
747 |
+
Parameters:
|
748 |
+
config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model.
|
749 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
750 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
751 |
+
"""
|
752 |
+
|
753 |
+
CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r"""
|
754 |
+
Args:
|
755 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
756 |
+
Indices of input sequence tokens in the vocabulary.
|
757 |
+
|
758 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
759 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
760 |
+
|
761 |
+
[What are input IDs?](../glossary#input-ids)
|
762 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
763 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
764 |
+
|
765 |
+
- 1 for tokens that are **not masked**,
|
766 |
+
- 0 for tokens that are **masked**.
|
767 |
+
|
768 |
+
[What are attention masks?](../glossary#attention-mask)
|
769 |
+
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
770 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
771 |
+
1]`:
|
772 |
+
|
773 |
+
- 0 corresponds to a *sentence A* token,
|
774 |
+
- 1 corresponds to a *sentence B* token.
|
775 |
+
|
776 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
777 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
778 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
779 |
+
config.max_position_embeddings - 1]`.
|
780 |
+
|
781 |
+
[What are position IDs?](../glossary#position-ids)
|
782 |
+
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
783 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
784 |
+
|
785 |
+
- 1 indicates the head is **not masked**,
|
786 |
+
- 0 indicates the head is **masked**.
|
787 |
+
|
788 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
789 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
790 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
791 |
+
model's internal embedding lookup matrix.
|
792 |
+
output_attentions (`bool`, *optional*):
|
793 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
794 |
+
tensors for more detail.
|
795 |
+
output_hidden_states (`bool`, *optional*):
|
796 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
797 |
+
more detail.
|
798 |
+
return_dict (`bool`, *optional*):
|
799 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
800 |
+
"""
|
801 |
+
|
802 |
+
CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r"""
|
803 |
+
Args:
|
804 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
805 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
806 |
+
[`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
|
807 |
+
output_attentions (`bool`, *optional*):
|
808 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
809 |
+
tensors for more detail.
|
810 |
+
output_hidden_states (`bool`, *optional*):
|
811 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
812 |
+
more detail.
|
813 |
+
return_dict (`bool`, *optional*):
|
814 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
815 |
+
"""
|
816 |
+
|
817 |
+
CHINESE_CLIP_INPUTS_DOCSTRING = r"""
|
818 |
+
Args:
|
819 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
820 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
821 |
+
it.
|
822 |
+
|
823 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
824 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
825 |
+
|
826 |
+
[What are input IDs?](../glossary#input-ids)
|
827 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
828 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
829 |
+
|
830 |
+
- 1 for tokens that are **not masked**,
|
831 |
+
- 0 for tokens that are **masked**.
|
832 |
+
|
833 |
+
[What are attention masks?](../glossary#attention-mask)
|
834 |
+
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
835 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
836 |
+
1]`:
|
837 |
+
|
838 |
+
- 0 corresponds to a *sentence A* token,
|
839 |
+
- 1 corresponds to a *sentence B* token.
|
840 |
+
|
841 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
842 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
843 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
844 |
+
config.max_position_embeddings - 1]`.
|
845 |
+
|
846 |
+
[What are position IDs?](../glossary#position-ids)
|
847 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
848 |
+
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
|
849 |
+
[`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
|
850 |
+
return_loss (`bool`, *optional*):
|
851 |
+
Whether or not to return the contrastive loss.
|
852 |
+
output_attentions (`bool`, *optional*):
|
853 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
854 |
+
tensors for more detail.
|
855 |
+
output_hidden_states (`bool`, *optional*):
|
856 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
857 |
+
more detail.
|
858 |
+
return_dict (`bool`, *optional*):
|
859 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
860 |
+
"""
|
861 |
+
|
862 |
+
|
863 |
+
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText
|
864 |
+
class ChineseCLIPTextEncoder(nn.Module):
|
865 |
+
def __init__(self, config):
|
866 |
+
super().__init__()
|
867 |
+
self.config = config
|
868 |
+
self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)])
|
869 |
+
self.gradient_checkpointing = False
|
870 |
+
|
871 |
+
def forward(
|
872 |
+
self,
|
873 |
+
hidden_states: torch.Tensor,
|
874 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
875 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
876 |
+
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
877 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
878 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
879 |
+
use_cache: Optional[bool] = None,
|
880 |
+
output_attentions: Optional[bool] = False,
|
881 |
+
output_hidden_states: Optional[bool] = False,
|
882 |
+
return_dict: Optional[bool] = True,
|
883 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
|
884 |
+
all_hidden_states = () if output_hidden_states else None
|
885 |
+
all_self_attentions = () if output_attentions else None
|
886 |
+
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
887 |
+
|
888 |
+
if self.gradient_checkpointing and self.training:
|
889 |
+
if use_cache:
|
890 |
+
logger.warning_once(
|
891 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
892 |
+
)
|
893 |
+
use_cache = False
|
894 |
+
|
895 |
+
next_decoder_cache = () if use_cache else None
|
896 |
+
for i, layer_module in enumerate(self.layer):
|
897 |
+
if output_hidden_states:
|
898 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
899 |
+
|
900 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
901 |
+
past_key_value = past_key_values[i] if past_key_values is not None else None
|
902 |
+
|
903 |
+
if self.gradient_checkpointing and self.training:
|
904 |
+
layer_outputs = self._gradient_checkpointing_func(
|
905 |
+
layer_module.__call__,
|
906 |
+
hidden_states,
|
907 |
+
attention_mask,
|
908 |
+
layer_head_mask,
|
909 |
+
encoder_hidden_states,
|
910 |
+
encoder_attention_mask,
|
911 |
+
past_key_value,
|
912 |
+
output_attentions,
|
913 |
+
)
|
914 |
+
else:
|
915 |
+
layer_outputs = layer_module(
|
916 |
+
hidden_states,
|
917 |
+
attention_mask,
|
918 |
+
layer_head_mask,
|
919 |
+
encoder_hidden_states,
|
920 |
+
encoder_attention_mask,
|
921 |
+
past_key_value,
|
922 |
+
output_attentions,
|
923 |
+
)
|
924 |
+
|
925 |
+
hidden_states = layer_outputs[0]
|
926 |
+
if use_cache:
|
927 |
+
next_decoder_cache += (layer_outputs[-1],)
|
928 |
+
if output_attentions:
|
929 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
930 |
+
if self.config.add_cross_attention:
|
931 |
+
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
|
932 |
+
|
933 |
+
if output_hidden_states:
|
934 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
935 |
+
|
936 |
+
if not return_dict:
|
937 |
+
return tuple(
|
938 |
+
v
|
939 |
+
for v in [
|
940 |
+
hidden_states,
|
941 |
+
next_decoder_cache,
|
942 |
+
all_hidden_states,
|
943 |
+
all_self_attentions,
|
944 |
+
all_cross_attentions,
|
945 |
+
]
|
946 |
+
if v is not None
|
947 |
+
)
|
948 |
+
return BaseModelOutputWithPastAndCrossAttentions(
|
949 |
+
last_hidden_state=hidden_states,
|
950 |
+
past_key_values=next_decoder_cache,
|
951 |
+
hidden_states=all_hidden_states,
|
952 |
+
attentions=all_self_attentions,
|
953 |
+
cross_attentions=all_cross_attentions,
|
954 |
+
)
|
955 |
+
|
956 |
+
|
957 |
+
class ChineseCLIPVisionEncoder(nn.Module):
|
958 |
+
"""
|
959 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
960 |
+
[`ChineseCLIPVisionEncoderLayer`].
|
961 |
+
|
962 |
+
Args:
|
963 |
+
config: ChineseCLIPConfig
|
964 |
+
"""
|
965 |
+
|
966 |
+
def __init__(self, config: ChineseCLIPConfig):
|
967 |
+
super().__init__()
|
968 |
+
self.config = config
|
969 |
+
self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)])
|
970 |
+
self.gradient_checkpointing = False
|
971 |
+
|
972 |
+
def forward(
|
973 |
+
self,
|
974 |
+
inputs_embeds,
|
975 |
+
output_attentions: Optional[bool] = None,
|
976 |
+
output_hidden_states: Optional[bool] = None,
|
977 |
+
return_dict: Optional[bool] = None,
|
978 |
+
) -> Union[Tuple, BaseModelOutput]:
|
979 |
+
r"""
|
980 |
+
Args:
|
981 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
982 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
983 |
+
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
984 |
+
than the model's internal embedding lookup matrix.
|
985 |
+
output_attentions (`bool`, *optional*):
|
986 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
987 |
+
returned tensors for more detail.
|
988 |
+
output_hidden_states (`bool`, *optional*):
|
989 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
990 |
+
for more detail.
|
991 |
+
return_dict (`bool`, *optional*):
|
992 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
993 |
+
"""
|
994 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
995 |
+
output_hidden_states = (
|
996 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
997 |
+
)
|
998 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
999 |
+
|
1000 |
+
encoder_states = () if output_hidden_states else None
|
1001 |
+
all_attentions = () if output_attentions else None
|
1002 |
+
|
1003 |
+
hidden_states = inputs_embeds
|
1004 |
+
for idx, encoder_layer in enumerate(self.layers):
|
1005 |
+
if output_hidden_states:
|
1006 |
+
encoder_states = encoder_states + (hidden_states,)
|
1007 |
+
if self.gradient_checkpointing and self.training:
|
1008 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1009 |
+
encoder_layer.__call__,
|
1010 |
+
hidden_states,
|
1011 |
+
output_attentions,
|
1012 |
+
)
|
1013 |
+
else:
|
1014 |
+
layer_outputs = encoder_layer(
|
1015 |
+
hidden_states,
|
1016 |
+
output_attentions=output_attentions,
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
hidden_states = layer_outputs[0]
|
1020 |
+
|
1021 |
+
if output_attentions:
|
1022 |
+
all_attentions = all_attentions + (layer_outputs[1],)
|
1023 |
+
|
1024 |
+
if output_hidden_states:
|
1025 |
+
encoder_states = encoder_states + (hidden_states,)
|
1026 |
+
|
1027 |
+
if not return_dict:
|
1028 |
+
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
|
1029 |
+
return BaseModelOutput(
|
1030 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
|
1031 |
+
)
|
1032 |
+
|
1033 |
+
|
1034 |
+
class ChineseCLIPVisionTransformer(nn.Module):
|
1035 |
+
def __init__(self, config: ChineseCLIPVisionConfig):
|
1036 |
+
super().__init__()
|
1037 |
+
self.config = config
|
1038 |
+
embed_dim = config.hidden_size
|
1039 |
+
|
1040 |
+
self.embeddings = ChineseCLIPVisionEmbeddings(config)
|
1041 |
+
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
1042 |
+
self.encoder = ChineseCLIPVisionEncoder(config)
|
1043 |
+
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
1044 |
+
|
1045 |
+
@add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
|
1046 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
|
1047 |
+
def forward(
|
1048 |
+
self,
|
1049 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1050 |
+
output_attentions: Optional[bool] = None,
|
1051 |
+
output_hidden_states: Optional[bool] = None,
|
1052 |
+
return_dict: Optional[bool] = None,
|
1053 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
1054 |
+
r"""
|
1055 |
+
Returns:
|
1056 |
+
"""
|
1057 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1058 |
+
output_hidden_states = (
|
1059 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1060 |
+
)
|
1061 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1062 |
+
|
1063 |
+
if pixel_values is None:
|
1064 |
+
raise ValueError("You have to specify pixel_values")
|
1065 |
+
|
1066 |
+
hidden_states = self.embeddings(pixel_values)
|
1067 |
+
hidden_states = self.pre_layrnorm(hidden_states)
|
1068 |
+
|
1069 |
+
encoder_outputs = self.encoder(
|
1070 |
+
inputs_embeds=hidden_states,
|
1071 |
+
output_attentions=output_attentions,
|
1072 |
+
output_hidden_states=output_hidden_states,
|
1073 |
+
return_dict=return_dict,
|
1074 |
+
)
|
1075 |
+
|
1076 |
+
last_hidden_state = encoder_outputs[0]
|
1077 |
+
pooled_output = last_hidden_state[:, 0, :]
|
1078 |
+
pooled_output = self.post_layernorm(pooled_output)
|
1079 |
+
|
1080 |
+
if not return_dict:
|
1081 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
1082 |
+
|
1083 |
+
return BaseModelOutputWithPooling(
|
1084 |
+
last_hidden_state=last_hidden_state,
|
1085 |
+
pooler_output=pooled_output,
|
1086 |
+
hidden_states=encoder_outputs.hidden_states,
|
1087 |
+
attentions=encoder_outputs.attentions,
|
1088 |
+
)
|
1089 |
+
|
1090 |
+
|
1091 |
+
@add_start_docstrings(
|
1092 |
+
"The text model from CHINESE_CLIP without any head or projection on top.",
|
1093 |
+
CHINESE_CLIP_START_DOCSTRING,
|
1094 |
+
)
|
1095 |
+
class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel):
|
1096 |
+
"""
|
1097 |
+
|
1098 |
+
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
1099 |
+
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
|
1100 |
+
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
1101 |
+
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
1102 |
+
|
1103 |
+
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
|
1104 |
+
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
|
1105 |
+
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
|
1106 |
+
"""
|
1107 |
+
|
1108 |
+
config_class = ChineseCLIPTextConfig
|
1109 |
+
|
1110 |
+
def __init__(self, config, add_pooling_layer=True):
|
1111 |
+
super().__init__(config)
|
1112 |
+
self.config = config
|
1113 |
+
|
1114 |
+
self.embeddings = ChineseCLIPTextEmbeddings(config)
|
1115 |
+
self.encoder = ChineseCLIPTextEncoder(config)
|
1116 |
+
|
1117 |
+
self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None
|
1118 |
+
|
1119 |
+
# Initialize weights and apply final processing
|
1120 |
+
self.post_init()
|
1121 |
+
|
1122 |
+
def get_input_embeddings(self):
|
1123 |
+
return self.embeddings.word_embeddings
|
1124 |
+
|
1125 |
+
def set_input_embeddings(self, value):
|
1126 |
+
self.embeddings.word_embeddings = value
|
1127 |
+
|
1128 |
+
def _prune_heads(self, heads_to_prune):
|
1129 |
+
"""
|
1130 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
1131 |
+
class PreTrainedModel
|
1132 |
+
"""
|
1133 |
+
for layer, heads in heads_to_prune.items():
|
1134 |
+
self.encoder.layer[layer].attention.prune_heads(heads)
|
1135 |
+
|
1136 |
+
@add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
1137 |
+
@add_code_sample_docstrings(
|
1138 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1139 |
+
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
|
1140 |
+
config_class=_CONFIG_FOR_DOC,
|
1141 |
+
)
|
1142 |
+
def forward(
|
1143 |
+
self,
|
1144 |
+
input_ids: Optional[torch.Tensor] = None,
|
1145 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1146 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1147 |
+
position_ids: Optional[torch.Tensor] = None,
|
1148 |
+
head_mask: Optional[torch.Tensor] = None,
|
1149 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1150 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1151 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
1152 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1153 |
+
use_cache: Optional[bool] = None,
|
1154 |
+
output_attentions: Optional[bool] = None,
|
1155 |
+
output_hidden_states: Optional[bool] = None,
|
1156 |
+
return_dict: Optional[bool] = None,
|
1157 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
1158 |
+
r"""
|
1159 |
+
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1160 |
+
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
1161 |
+
the model is configured as a decoder.
|
1162 |
+
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1163 |
+
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
1164 |
+
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
|
1165 |
+
|
1166 |
+
- 1 for tokens that are **not masked**,
|
1167 |
+
- 0 for tokens that are **masked**.
|
1168 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
1169 |
+
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
1170 |
+
|
1171 |
+
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
|
1172 |
+
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
|
1173 |
+
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
|
1174 |
+
use_cache (`bool`, *optional*):
|
1175 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1176 |
+
`past_key_values`).
|
1177 |
+
"""
|
1178 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1179 |
+
output_hidden_states = (
|
1180 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1181 |
+
)
|
1182 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1183 |
+
|
1184 |
+
if self.config.is_decoder:
|
1185 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1186 |
+
else:
|
1187 |
+
use_cache = False
|
1188 |
+
|
1189 |
+
if input_ids is not None and inputs_embeds is not None:
|
1190 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
1191 |
+
elif input_ids is not None:
|
1192 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
1193 |
+
input_shape = input_ids.size()
|
1194 |
+
elif inputs_embeds is not None:
|
1195 |
+
input_shape = inputs_embeds.size()[:-1]
|
1196 |
+
else:
|
1197 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1198 |
+
|
1199 |
+
batch_size, seq_length = input_shape
|
1200 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1201 |
+
|
1202 |
+
# past_key_values_length
|
1203 |
+
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
1204 |
+
|
1205 |
+
if attention_mask is None:
|
1206 |
+
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
1207 |
+
|
1208 |
+
if token_type_ids is None:
|
1209 |
+
if hasattr(self.embeddings, "token_type_ids"):
|
1210 |
+
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
|
1211 |
+
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
|
1212 |
+
token_type_ids = buffered_token_type_ids_expanded
|
1213 |
+
else:
|
1214 |
+
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
1215 |
+
|
1216 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
1217 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
1218 |
+
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
|
1219 |
+
|
1220 |
+
# If a 2D or 3D attention mask is provided for the cross-attention
|
1221 |
+
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
1222 |
+
if self.config.is_decoder and encoder_hidden_states is not None:
|
1223 |
+
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
1224 |
+
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
1225 |
+
if encoder_attention_mask is None:
|
1226 |
+
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
1227 |
+
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
1228 |
+
else:
|
1229 |
+
encoder_extended_attention_mask = None
|
1230 |
+
|
1231 |
+
# Prepare head mask if needed
|
1232 |
+
# 1.0 in head_mask indicate we keep the head
|
1233 |
+
# attention_probs has shape bsz x n_heads x N x N
|
1234 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
1235 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
1236 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
1237 |
+
|
1238 |
+
embedding_output = self.embeddings(
|
1239 |
+
input_ids=input_ids,
|
1240 |
+
position_ids=position_ids,
|
1241 |
+
token_type_ids=token_type_ids,
|
1242 |
+
inputs_embeds=inputs_embeds,
|
1243 |
+
past_key_values_length=past_key_values_length,
|
1244 |
+
)
|
1245 |
+
encoder_outputs = self.encoder(
|
1246 |
+
embedding_output,
|
1247 |
+
attention_mask=extended_attention_mask,
|
1248 |
+
head_mask=head_mask,
|
1249 |
+
encoder_hidden_states=encoder_hidden_states,
|
1250 |
+
encoder_attention_mask=encoder_extended_attention_mask,
|
1251 |
+
past_key_values=past_key_values,
|
1252 |
+
use_cache=use_cache,
|
1253 |
+
output_attentions=output_attentions,
|
1254 |
+
output_hidden_states=output_hidden_states,
|
1255 |
+
return_dict=return_dict,
|
1256 |
+
)
|
1257 |
+
sequence_output = encoder_outputs[0]
|
1258 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
1259 |
+
|
1260 |
+
if not return_dict:
|
1261 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
1262 |
+
|
1263 |
+
return BaseModelOutputWithPoolingAndCrossAttentions(
|
1264 |
+
last_hidden_state=sequence_output,
|
1265 |
+
pooler_output=pooled_output,
|
1266 |
+
past_key_values=encoder_outputs.past_key_values,
|
1267 |
+
hidden_states=encoder_outputs.hidden_states,
|
1268 |
+
attentions=encoder_outputs.attentions,
|
1269 |
+
cross_attentions=encoder_outputs.cross_attentions,
|
1270 |
+
)
|
1271 |
+
|
1272 |
+
|
1273 |
+
@add_start_docstrings(
|
1274 |
+
"""The vision model from CHINESE_CLIP without any head or projection on top.""",
|
1275 |
+
CHINESE_CLIP_START_DOCSTRING,
|
1276 |
+
)
|
1277 |
+
class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel):
|
1278 |
+
config_class = ChineseCLIPVisionConfig
|
1279 |
+
main_input_name = "pixel_values"
|
1280 |
+
|
1281 |
+
def __init__(self, config: ChineseCLIPVisionConfig):
|
1282 |
+
super().__init__(config)
|
1283 |
+
self.vision_model = ChineseCLIPVisionTransformer(config)
|
1284 |
+
# Initialize weights and apply final processing
|
1285 |
+
self.post_init()
|
1286 |
+
|
1287 |
+
def get_input_embeddings(self) -> nn.Module:
|
1288 |
+
return self.vision_model.embeddings.patch_embedding
|
1289 |
+
|
1290 |
+
@add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
|
1291 |
+
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
|
1292 |
+
def forward(
|
1293 |
+
self,
|
1294 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1295 |
+
output_attentions: Optional[bool] = None,
|
1296 |
+
output_hidden_states: Optional[bool] = None,
|
1297 |
+
return_dict: Optional[bool] = None,
|
1298 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
1299 |
+
r"""
|
1300 |
+
Returns:
|
1301 |
+
|
1302 |
+
Examples:
|
1303 |
+
|
1304 |
+
```python
|
1305 |
+
>>> from PIL import Image
|
1306 |
+
>>> import requests
|
1307 |
+
>>> from transformers import CLIPProcessor, ChineseCLIPVisionModel
|
1308 |
+
|
1309 |
+
>>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1310 |
+
>>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1311 |
+
|
1312 |
+
>>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
|
1313 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1314 |
+
|
1315 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
1316 |
+
|
1317 |
+
>>> outputs = model(**inputs)
|
1318 |
+
>>> last_hidden_state = outputs.last_hidden_state
|
1319 |
+
>>> pooled_output = outputs.pooler_output # pooled CLS states
|
1320 |
+
```"""
|
1321 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1322 |
+
|
1323 |
+
return self.vision_model(
|
1324 |
+
pixel_values=pixel_values,
|
1325 |
+
output_attentions=output_attentions,
|
1326 |
+
output_hidden_states=output_hidden_states,
|
1327 |
+
return_dict=return_dict,
|
1328 |
+
)
|
1329 |
+
|
1330 |
+
|
1331 |
+
@add_start_docstrings(CHINESE_CLIP_START_DOCSTRING)
|
1332 |
+
class ChineseCLIPModel(ChineseCLIPPreTrainedModel):
|
1333 |
+
config_class = ChineseCLIPConfig
|
1334 |
+
|
1335 |
+
def __init__(self, config: ChineseCLIPConfig):
|
1336 |
+
super().__init__(config)
|
1337 |
+
|
1338 |
+
if not isinstance(config.text_config, ChineseCLIPTextConfig):
|
1339 |
+
raise ValueError(
|
1340 |
+
"config.text_config is expected to be of type ChineseCLIPTextConfig but is of type"
|
1341 |
+
f" {type(config.text_config)}."
|
1342 |
+
)
|
1343 |
+
|
1344 |
+
if not isinstance(config.vision_config, ChineseCLIPVisionConfig):
|
1345 |
+
raise ValueError(
|
1346 |
+
"config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type"
|
1347 |
+
f" {type(config.vision_config)}."
|
1348 |
+
)
|
1349 |
+
|
1350 |
+
text_config = config.text_config
|
1351 |
+
vision_config = config.vision_config
|
1352 |
+
|
1353 |
+
self.projection_dim = config.projection_dim
|
1354 |
+
self.text_embed_dim = text_config.hidden_size
|
1355 |
+
self.vision_embed_dim = vision_config.hidden_size
|
1356 |
+
|
1357 |
+
self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False)
|
1358 |
+
self.vision_model = ChineseCLIPVisionTransformer(vision_config)
|
1359 |
+
|
1360 |
+
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
|
1361 |
+
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
|
1362 |
+
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
1363 |
+
|
1364 |
+
# Initialize weights and apply final processing
|
1365 |
+
self.post_init()
|
1366 |
+
|
1367 |
+
@add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING)
|
1368 |
+
def get_text_features(
|
1369 |
+
self,
|
1370 |
+
input_ids: Optional[torch.Tensor] = None,
|
1371 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1372 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1373 |
+
position_ids: Optional[torch.Tensor] = None,
|
1374 |
+
output_attentions: Optional[bool] = None,
|
1375 |
+
output_hidden_states: Optional[bool] = None,
|
1376 |
+
return_dict: Optional[bool] = None,
|
1377 |
+
) -> torch.FloatTensor:
|
1378 |
+
r"""
|
1379 |
+
Returns:
|
1380 |
+
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
|
1381 |
+
applying the projection layer to the final [CLS] hidden state of Text-Transformer.
|
1382 |
+
|
1383 |
+
Examples:
|
1384 |
+
|
1385 |
+
```python
|
1386 |
+
>>> from transformers import AutoTokenizer, ChineseCLIPModel
|
1387 |
+
|
1388 |
+
>>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1389 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1390 |
+
|
1391 |
+
>>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt")
|
1392 |
+
>>> text_features = model.get_text_features(**inputs)
|
1393 |
+
>>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
|
1394 |
+
```"""
|
1395 |
+
# Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
1396 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1397 |
+
output_hidden_states = (
|
1398 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1399 |
+
)
|
1400 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1401 |
+
|
1402 |
+
text_outputs = self.text_model(
|
1403 |
+
input_ids=input_ids,
|
1404 |
+
attention_mask=attention_mask,
|
1405 |
+
token_type_ids=token_type_ids,
|
1406 |
+
position_ids=position_ids,
|
1407 |
+
output_attentions=output_attentions,
|
1408 |
+
output_hidden_states=output_hidden_states,
|
1409 |
+
return_dict=return_dict,
|
1410 |
+
)
|
1411 |
+
|
1412 |
+
pooled_output = text_outputs[0][:, 0, :]
|
1413 |
+
text_features = self.text_projection(pooled_output)
|
1414 |
+
|
1415 |
+
return text_features
|
1416 |
+
|
1417 |
+
@add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
|
1418 |
+
def get_image_features(
|
1419 |
+
self,
|
1420 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1421 |
+
output_attentions: Optional[bool] = None,
|
1422 |
+
output_hidden_states: Optional[bool] = None,
|
1423 |
+
return_dict: Optional[bool] = None,
|
1424 |
+
) -> torch.FloatTensor:
|
1425 |
+
r"""
|
1426 |
+
Returns:
|
1427 |
+
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
|
1428 |
+
applying the projection layer to the final [CLS] hidden state of Vision-Transformer.
|
1429 |
+
|
1430 |
+
Examples:
|
1431 |
+
|
1432 |
+
```python
|
1433 |
+
>>> from PIL import Image
|
1434 |
+
>>> import requests
|
1435 |
+
>>> from transformers import AutoProcessor, ChineseCLIPModel
|
1436 |
+
|
1437 |
+
>>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1438 |
+
>>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1439 |
+
|
1440 |
+
>>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
|
1441 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1442 |
+
|
1443 |
+
>>> inputs = processor(images=image, return_tensors="pt")
|
1444 |
+
|
1445 |
+
>>> image_features = model.get_image_features(**inputs)
|
1446 |
+
>>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
|
1447 |
+
```"""
|
1448 |
+
# Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
1449 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1450 |
+
output_hidden_states = (
|
1451 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1452 |
+
)
|
1453 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1454 |
+
|
1455 |
+
vision_outputs = self.vision_model(
|
1456 |
+
pixel_values=pixel_values,
|
1457 |
+
output_attentions=output_attentions,
|
1458 |
+
output_hidden_states=output_hidden_states,
|
1459 |
+
return_dict=return_dict,
|
1460 |
+
)
|
1461 |
+
|
1462 |
+
pooled_output = vision_outputs[1] # pooled_output
|
1463 |
+
image_features = self.visual_projection(pooled_output)
|
1464 |
+
|
1465 |
+
return image_features
|
1466 |
+
|
1467 |
+
@add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING)
|
1468 |
+
@replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig)
|
1469 |
+
def forward(
|
1470 |
+
self,
|
1471 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1472 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
1473 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1474 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
1475 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1476 |
+
return_loss: Optional[bool] = None,
|
1477 |
+
output_attentions: Optional[bool] = None,
|
1478 |
+
output_hidden_states: Optional[bool] = None,
|
1479 |
+
return_dict: Optional[bool] = None,
|
1480 |
+
) -> Union[Tuple, ChineseCLIPOutput]:
|
1481 |
+
r"""
|
1482 |
+
Returns:
|
1483 |
+
|
1484 |
+
Examples:
|
1485 |
+
|
1486 |
+
```python
|
1487 |
+
>>> from PIL import Image
|
1488 |
+
>>> import requests
|
1489 |
+
>>> from transformers import AutoProcessor, ChineseCLIPModel
|
1490 |
+
|
1491 |
+
>>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1492 |
+
>>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
|
1493 |
+
|
1494 |
+
>>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
|
1495 |
+
>>> image = Image.open(requests.get(url, stream=True).raw)
|
1496 |
+
|
1497 |
+
>>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True)
|
1498 |
+
|
1499 |
+
>>> outputs = model(**inputs)
|
1500 |
+
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
1501 |
+
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
1502 |
+
```"""
|
1503 |
+
# Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
1504 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1505 |
+
output_hidden_states = (
|
1506 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1507 |
+
)
|
1508 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1509 |
+
|
1510 |
+
vision_outputs = self.vision_model(
|
1511 |
+
pixel_values=pixel_values,
|
1512 |
+
output_attentions=output_attentions,
|
1513 |
+
output_hidden_states=output_hidden_states,
|
1514 |
+
return_dict=return_dict,
|
1515 |
+
)
|
1516 |
+
|
1517 |
+
text_outputs = self.text_model(
|
1518 |
+
input_ids=input_ids,
|
1519 |
+
attention_mask=attention_mask,
|
1520 |
+
token_type_ids=token_type_ids,
|
1521 |
+
position_ids=position_ids,
|
1522 |
+
output_attentions=output_attentions,
|
1523 |
+
output_hidden_states=output_hidden_states,
|
1524 |
+
return_dict=return_dict,
|
1525 |
+
)
|
1526 |
+
|
1527 |
+
image_embeds = vision_outputs[1]
|
1528 |
+
image_embeds = self.visual_projection(image_embeds)
|
1529 |
+
|
1530 |
+
text_embeds = text_outputs[0][:, 0, :]
|
1531 |
+
text_embeds = self.text_projection(text_embeds)
|
1532 |
+
|
1533 |
+
# normalized features
|
1534 |
+
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
|
1535 |
+
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
|
1536 |
+
|
1537 |
+
# cosine similarity as logits
|
1538 |
+
logit_scale = self.logit_scale.exp()
|
1539 |
+
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
|
1540 |
+
logits_per_image = logits_per_text.t()
|
1541 |
+
|
1542 |
+
loss = None
|
1543 |
+
if return_loss:
|
1544 |
+
loss = chinese_clip_loss(logits_per_text)
|
1545 |
+
|
1546 |
+
if not return_dict:
|
1547 |
+
# fix the None pooled_output of text_outputs to conform with dict_output
|
1548 |
+
pooled_output = text_outputs[1]
|
1549 |
+
if pooled_output is None:
|
1550 |
+
text_outputs = (text_outputs[0],) + text_outputs[2:]
|
1551 |
+
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
|
1552 |
+
return ((loss,) + output) if loss is not None else output
|
1553 |
+
|
1554 |
+
return ChineseCLIPOutput(
|
1555 |
+
loss=loss,
|
1556 |
+
logits_per_image=logits_per_image,
|
1557 |
+
logits_per_text=logits_per_text,
|
1558 |
+
text_embeds=text_embeds,
|
1559 |
+
image_embeds=image_embeds,
|
1560 |
+
text_model_output=text_outputs,
|
1561 |
+
vision_model_output=vision_outputs,
|
1562 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Image/Text processor class for Chinese-CLIP
|
17 |
+
"""
|
18 |
+
|
19 |
+
import warnings
|
20 |
+
|
21 |
+
from ...processing_utils import ProcessorMixin
|
22 |
+
from ...tokenization_utils_base import BatchEncoding
|
23 |
+
|
24 |
+
|
25 |
+
class ChineseCLIPProcessor(ProcessorMixin):
|
26 |
+
r"""
|
27 |
+
Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a
|
28 |
+
single processor.
|
29 |
+
|
30 |
+
[`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`].
|
31 |
+
See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
image_processor ([`ChineseCLIPImageProcessor`], *optional*):
|
35 |
+
The image processor is a required input.
|
36 |
+
tokenizer ([`BertTokenizerFast`], *optional*):
|
37 |
+
The tokenizer is a required input.
|
38 |
+
"""
|
39 |
+
|
40 |
+
attributes = ["image_processor", "tokenizer"]
|
41 |
+
image_processor_class = "ChineseCLIPImageProcessor"
|
42 |
+
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
|
43 |
+
|
44 |
+
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
|
45 |
+
feature_extractor = None
|
46 |
+
if "feature_extractor" in kwargs:
|
47 |
+
warnings.warn(
|
48 |
+
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
|
49 |
+
" instead.",
|
50 |
+
FutureWarning,
|
51 |
+
)
|
52 |
+
feature_extractor = kwargs.pop("feature_extractor")
|
53 |
+
|
54 |
+
image_processor = image_processor if image_processor is not None else feature_extractor
|
55 |
+
if image_processor is None:
|
56 |
+
raise ValueError("You need to specify an `image_processor`.")
|
57 |
+
if tokenizer is None:
|
58 |
+
raise ValueError("You need to specify a `tokenizer`.")
|
59 |
+
|
60 |
+
super().__init__(image_processor, tokenizer)
|
61 |
+
self.current_processor = self.image_processor
|
62 |
+
|
63 |
+
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
|
64 |
+
"""
|
65 |
+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
|
66 |
+
and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
|
67 |
+
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
|
68 |
+
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
|
69 |
+
of the above two methods for more information.
|
70 |
+
|
71 |
+
Args:
|
72 |
+
text (`str`, `List[str]`, `List[List[str]]`):
|
73 |
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
74 |
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
75 |
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
76 |
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
77 |
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
78 |
+
tensor. Both channels-first and channels-last formats are supported.
|
79 |
+
|
80 |
+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
81 |
+
If set, will return tensors of a particular framework. Acceptable values are:
|
82 |
+
|
83 |
+
- `'tf'`: Return TensorFlow `tf.constant` objects.
|
84 |
+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
|
85 |
+
- `'np'`: Return NumPy `np.ndarray` objects.
|
86 |
+
- `'jax'`: Return JAX `jnp.ndarray` objects.
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
|
90 |
+
|
91 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
92 |
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
93 |
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
94 |
+
`None`).
|
95 |
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
96 |
+
"""
|
97 |
+
|
98 |
+
if text is None and images is None:
|
99 |
+
raise ValueError("You have to specify either text or images. Both cannot be none.")
|
100 |
+
|
101 |
+
if text is not None:
|
102 |
+
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
|
103 |
+
|
104 |
+
if images is not None:
|
105 |
+
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
|
106 |
+
|
107 |
+
if text is not None and images is not None:
|
108 |
+
encoding["pixel_values"] = image_features.pixel_values
|
109 |
+
return encoding
|
110 |
+
elif text is not None:
|
111 |
+
return encoding
|
112 |
+
else:
|
113 |
+
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
|
114 |
+
|
115 |
+
def batch_decode(self, *args, **kwargs):
|
116 |
+
"""
|
117 |
+
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
118 |
+
refer to the docstring of this method for more information.
|
119 |
+
"""
|
120 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
121 |
+
|
122 |
+
def decode(self, *args, **kwargs):
|
123 |
+
"""
|
124 |
+
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
125 |
+
the docstring of this method for more information.
|
126 |
+
"""
|
127 |
+
return self.tokenizer.decode(*args, **kwargs)
|
128 |
+
|
129 |
+
@property
|
130 |
+
def model_input_names(self):
|
131 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
132 |
+
image_processor_input_names = self.image_processor.model_input_names
|
133 |
+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
134 |
+
|
135 |
+
@property
|
136 |
+
def feature_extractor_class(self):
|
137 |
+
warnings.warn(
|
138 |
+
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
|
139 |
+
FutureWarning,
|
140 |
+
)
|
141 |
+
return self.image_processor_class
|
llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__init__.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from typing import TYPE_CHECKING
|
16 |
+
|
17 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
|
18 |
+
|
19 |
+
|
20 |
+
_import_structure = {}
|
21 |
+
|
22 |
+
try:
|
23 |
+
if not is_sentencepiece_available():
|
24 |
+
raise OptionalDependencyNotAvailable()
|
25 |
+
except OptionalDependencyNotAvailable:
|
26 |
+
pass
|
27 |
+
else:
|
28 |
+
_import_structure["tokenization_cpm"] = ["CpmTokenizer"]
|
29 |
+
|
30 |
+
try:
|
31 |
+
if not is_tokenizers_available():
|
32 |
+
raise OptionalDependencyNotAvailable()
|
33 |
+
except OptionalDependencyNotAvailable:
|
34 |
+
pass
|
35 |
+
else:
|
36 |
+
_import_structure["tokenization_cpm_fast"] = ["CpmTokenizerFast"]
|
37 |
+
|
38 |
+
|
39 |
+
if TYPE_CHECKING:
|
40 |
+
try:
|
41 |
+
if not is_sentencepiece_available():
|
42 |
+
raise OptionalDependencyNotAvailable()
|
43 |
+
except OptionalDependencyNotAvailable:
|
44 |
+
pass
|
45 |
+
else:
|
46 |
+
from .tokenization_cpm import CpmTokenizer
|
47 |
+
|
48 |
+
try:
|
49 |
+
if not is_tokenizers_available():
|
50 |
+
raise OptionalDependencyNotAvailable()
|
51 |
+
except OptionalDependencyNotAvailable:
|
52 |
+
pass
|
53 |
+
else:
|
54 |
+
from .tokenization_cpm_fast import CpmTokenizerFast
|
55 |
+
|
56 |
+
else:
|
57 |
+
import sys
|
58 |
+
|
59 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (907 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc
ADDED
Binary file (9.33 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py
ADDED
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes."""
|
16 |
+
import os
|
17 |
+
import unicodedata
|
18 |
+
from shutil import copyfile
|
19 |
+
from typing import Any, Dict, List, Optional, Tuple
|
20 |
+
|
21 |
+
import sentencepiece as spm
|
22 |
+
|
23 |
+
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
|
24 |
+
from ...utils import SPIECE_UNDERLINE, logging
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
|
30 |
+
|
31 |
+
|
32 |
+
class CpmTokenizer(PreTrainedTokenizer):
|
33 |
+
"""Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
|
34 |
+
|
35 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
36 |
+
|
37 |
+
def __init__(
|
38 |
+
self,
|
39 |
+
vocab_file,
|
40 |
+
do_lower_case=False,
|
41 |
+
remove_space=True,
|
42 |
+
keep_accents=False,
|
43 |
+
bos_token="<s>",
|
44 |
+
eos_token="</s>",
|
45 |
+
unk_token="<unk>",
|
46 |
+
sep_token="<sep>",
|
47 |
+
pad_token="<pad>",
|
48 |
+
cls_token="<cls>",
|
49 |
+
mask_token="<mask>",
|
50 |
+
additional_special_tokens=["<eop>", "<eod>"],
|
51 |
+
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
52 |
+
**kwargs,
|
53 |
+
) -> None:
|
54 |
+
"""
|
55 |
+
Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
|
56 |
+
[SentencePiece](https://github.com/google/sentencepiece).
|
57 |
+
|
58 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
|
59 |
+
refer to this superclass for more information regarding those methods.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
vocab_file (`str`):
|
63 |
+
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
|
64 |
+
contains the vocabulary necessary to instantiate a tokenizer.
|
65 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
66 |
+
Whether to lowercase the input when tokenizing.
|
67 |
+
remove_space (`bool`, *optional*, defaults to `True`):
|
68 |
+
Whether to strip the text when tokenizing (removing excess spaces before and after the string).
|
69 |
+
keep_accents (`bool`, *optional*, defaults to `False`):
|
70 |
+
Whether to keep accents when tokenizing.
|
71 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
72 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
|
73 |
+
token.
|
74 |
+
|
75 |
+
<Tip>
|
76 |
+
|
77 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
78 |
+
sequence. The token used is the `cls_token`.
|
79 |
+
|
80 |
+
</Tip>
|
81 |
+
|
82 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
83 |
+
The end of sequence token.
|
84 |
+
|
85 |
+
<Tip>
|
86 |
+
|
87 |
+
When building a sequence using special tokens, this is not the token that is used for the end of
|
88 |
+
sequence. The token used is the `sep_token`.
|
89 |
+
|
90 |
+
</Tip>
|
91 |
+
|
92 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
93 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
|
94 |
+
this token instead.
|
95 |
+
sep_token (`str`, *optional*, defaults to `"<sep>"`):
|
96 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
|
97 |
+
for sequence classification or for a text and a question for question answering. It is also used as the
|
98 |
+
last token of a sequence built with special tokens.
|
99 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
100 |
+
The token used for padding, for example when batching sequences of different lengths.
|
101 |
+
cls_token (`str`, *optional*, defaults to `"<cls>"`):
|
102 |
+
The classifier token which is used when doing sequence classification (classification of the whole
|
103 |
+
sequence instead of per-token classification). It is the first token of the sequence when built with
|
104 |
+
special tokens.
|
105 |
+
mask_token (`str`, *optional*, defaults to `"<mask>"`):
|
106 |
+
The token used for masking values. This is the token used when training this model with masked language
|
107 |
+
modeling. This is the token which the model will try to predict.
|
108 |
+
additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
|
109 |
+
Additional special tokens used by the tokenizer.
|
110 |
+
|
111 |
+
Attributes:
|
112 |
+
sp_model (`SentencePieceProcessor`):
|
113 |
+
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
|
114 |
+
"""
|
115 |
+
# Mask token behave like a normal word, i.e. include the space before it
|
116 |
+
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
|
117 |
+
|
118 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
119 |
+
|
120 |
+
self.do_lower_case = do_lower_case
|
121 |
+
self.remove_space = remove_space
|
122 |
+
self.keep_accents = keep_accents
|
123 |
+
self.vocab_file = vocab_file
|
124 |
+
|
125 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
126 |
+
self.sp_model.Load(vocab_file)
|
127 |
+
|
128 |
+
try:
|
129 |
+
import jieba
|
130 |
+
except ModuleNotFoundError as error:
|
131 |
+
raise error.__class__(
|
132 |
+
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
|
133 |
+
"See https://pypi.org/project/jieba/ for installation."
|
134 |
+
)
|
135 |
+
self.jieba = jieba
|
136 |
+
self.translator = str.maketrans(" \n", "\u2582\u2583")
|
137 |
+
|
138 |
+
super().__init__(
|
139 |
+
do_lower_case=do_lower_case,
|
140 |
+
remove_space=remove_space,
|
141 |
+
keep_accents=keep_accents,
|
142 |
+
bos_token=bos_token,
|
143 |
+
eos_token=eos_token,
|
144 |
+
unk_token=unk_token,
|
145 |
+
sep_token=sep_token,
|
146 |
+
pad_token=pad_token,
|
147 |
+
cls_token=cls_token,
|
148 |
+
mask_token=mask_token,
|
149 |
+
additional_special_tokens=additional_special_tokens,
|
150 |
+
sp_model_kwargs=self.sp_model_kwargs,
|
151 |
+
**kwargs,
|
152 |
+
)
|
153 |
+
|
154 |
+
self._pad_token_type_id = 3
|
155 |
+
|
156 |
+
@property
|
157 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
|
158 |
+
def vocab_size(self):
|
159 |
+
return len(self.sp_model)
|
160 |
+
|
161 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_vocab
|
162 |
+
def get_vocab(self):
|
163 |
+
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
164 |
+
vocab.update(self.added_tokens_encoder)
|
165 |
+
return vocab
|
166 |
+
|
167 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__getstate__
|
168 |
+
def __getstate__(self):
|
169 |
+
state = self.__dict__.copy()
|
170 |
+
state["sp_model"] = None
|
171 |
+
return state
|
172 |
+
|
173 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__setstate__
|
174 |
+
def __setstate__(self, d):
|
175 |
+
self.__dict__ = d
|
176 |
+
|
177 |
+
# for backward compatibility
|
178 |
+
if not hasattr(self, "sp_model_kwargs"):
|
179 |
+
self.sp_model_kwargs = {}
|
180 |
+
|
181 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
182 |
+
self.sp_model.Load(self.vocab_file)
|
183 |
+
|
184 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.preprocess_text
|
185 |
+
def preprocess_text(self, inputs):
|
186 |
+
if self.remove_space:
|
187 |
+
outputs = " ".join(inputs.strip().split())
|
188 |
+
else:
|
189 |
+
outputs = inputs
|
190 |
+
outputs = outputs.replace("``", '"').replace("''", '"')
|
191 |
+
|
192 |
+
if not self.keep_accents:
|
193 |
+
outputs = unicodedata.normalize("NFKD", outputs)
|
194 |
+
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
|
195 |
+
if self.do_lower_case:
|
196 |
+
outputs = outputs.lower()
|
197 |
+
|
198 |
+
return outputs
|
199 |
+
|
200 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._tokenize
|
201 |
+
def _tokenize(self, text: str) -> List[str]:
|
202 |
+
"""Tokenize a string."""
|
203 |
+
text = self.preprocess_text(text)
|
204 |
+
pieces = self.sp_model.encode(text, out_type=str)
|
205 |
+
new_pieces = []
|
206 |
+
for piece in pieces:
|
207 |
+
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
|
208 |
+
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
|
209 |
+
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
|
210 |
+
if len(cur_pieces[0]) == 1:
|
211 |
+
cur_pieces = cur_pieces[1:]
|
212 |
+
else:
|
213 |
+
cur_pieces[0] = cur_pieces[0][1:]
|
214 |
+
cur_pieces.append(piece[-1])
|
215 |
+
new_pieces.extend(cur_pieces)
|
216 |
+
else:
|
217 |
+
new_pieces.append(piece)
|
218 |
+
|
219 |
+
return new_pieces
|
220 |
+
|
221 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_token_to_id
|
222 |
+
def _convert_token_to_id(self, token):
|
223 |
+
"""Converts a token (str) in an id using the vocab."""
|
224 |
+
return self.sp_model.PieceToId(token)
|
225 |
+
|
226 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_id_to_token
|
227 |
+
def _convert_id_to_token(self, index):
|
228 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
229 |
+
return self.sp_model.IdToPiece(index)
|
230 |
+
|
231 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.convert_tokens_to_string
|
232 |
+
def convert_tokens_to_string(self, tokens):
|
233 |
+
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
|
234 |
+
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
|
235 |
+
return out_string
|
236 |
+
|
237 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.build_inputs_with_special_tokens
|
238 |
+
def build_inputs_with_special_tokens(
|
239 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
240 |
+
) -> List[int]:
|
241 |
+
"""
|
242 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
243 |
+
adding special tokens. An XLNet sequence has the following format:
|
244 |
+
|
245 |
+
- single sequence: `X <sep> <cls>`
|
246 |
+
- pair of sequences: `A <sep> B <sep> <cls>`
|
247 |
+
|
248 |
+
Args:
|
249 |
+
token_ids_0 (`List[int]`):
|
250 |
+
List of IDs to which the special tokens will be added.
|
251 |
+
token_ids_1 (`List[int]`, *optional*):
|
252 |
+
Optional second list of IDs for sequence pairs.
|
253 |
+
|
254 |
+
Returns:
|
255 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
256 |
+
"""
|
257 |
+
sep = [self.sep_token_id]
|
258 |
+
cls = [self.cls_token_id]
|
259 |
+
if token_ids_1 is None:
|
260 |
+
return token_ids_0 + sep + cls
|
261 |
+
return token_ids_0 + sep + token_ids_1 + sep + cls
|
262 |
+
|
263 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_special_tokens_mask
|
264 |
+
def get_special_tokens_mask(
|
265 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
266 |
+
) -> List[int]:
|
267 |
+
"""
|
268 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
269 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
270 |
+
|
271 |
+
Args:
|
272 |
+
token_ids_0 (`List[int]`):
|
273 |
+
List of IDs.
|
274 |
+
token_ids_1 (`List[int]`, *optional*):
|
275 |
+
Optional second list of IDs for sequence pairs.
|
276 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
277 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
278 |
+
|
279 |
+
Returns:
|
280 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
281 |
+
"""
|
282 |
+
|
283 |
+
if already_has_special_tokens:
|
284 |
+
return super().get_special_tokens_mask(
|
285 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
286 |
+
)
|
287 |
+
|
288 |
+
if token_ids_1 is not None:
|
289 |
+
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
|
290 |
+
return ([0] * len(token_ids_0)) + [1, 1]
|
291 |
+
|
292 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.create_token_type_ids_from_sequences
|
293 |
+
def create_token_type_ids_from_sequences(
|
294 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
295 |
+
) -> List[int]:
|
296 |
+
"""
|
297 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
|
298 |
+
sequence pair mask has the following format:
|
299 |
+
|
300 |
+
```
|
301 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
302 |
+
| first sequence | second sequence |
|
303 |
+
```
|
304 |
+
|
305 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
306 |
+
|
307 |
+
Args:
|
308 |
+
token_ids_0 (`List[int]`):
|
309 |
+
List of IDs.
|
310 |
+
token_ids_1 (`List[int]`, *optional*):
|
311 |
+
Optional second list of IDs for sequence pairs.
|
312 |
+
|
313 |
+
Returns:
|
314 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
315 |
+
"""
|
316 |
+
sep = [self.sep_token_id]
|
317 |
+
cls_segment_id = [2]
|
318 |
+
|
319 |
+
if token_ids_1 is None:
|
320 |
+
return len(token_ids_0 + sep) * [0] + cls_segment_id
|
321 |
+
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
|
322 |
+
|
323 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.save_vocabulary
|
324 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
325 |
+
if not os.path.isdir(save_directory):
|
326 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
327 |
+
return
|
328 |
+
out_vocab_file = os.path.join(
|
329 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
330 |
+
)
|
331 |
+
|
332 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
333 |
+
copyfile(self.vocab_file, out_vocab_file)
|
334 |
+
elif not os.path.isfile(self.vocab_file):
|
335 |
+
with open(out_vocab_file, "wb") as fi:
|
336 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
337 |
+
fi.write(content_spiece_model)
|
338 |
+
|
339 |
+
return (out_vocab_file,)
|
340 |
+
|
341 |
+
def _decode(self, *args, **kwargs):
|
342 |
+
text = super()._decode(*args, **kwargs)
|
343 |
+
text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
|
344 |
+
return text
|
llmeval-env/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Tokenization classes."""
|
16 |
+
import os
|
17 |
+
from shutil import copyfile
|
18 |
+
from typing import List, Optional, Tuple
|
19 |
+
|
20 |
+
from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast
|
21 |
+
from ...utils import logging
|
22 |
+
|
23 |
+
|
24 |
+
logger = logging.get_logger(__name__)
|
25 |
+
|
26 |
+
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
|
27 |
+
|
28 |
+
|
29 |
+
class CpmTokenizerFast(PreTrainedTokenizerFast):
|
30 |
+
"""Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
|
31 |
+
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
vocab_file=None,
|
35 |
+
tokenizer_file=None,
|
36 |
+
do_lower_case=False,
|
37 |
+
remove_space=True,
|
38 |
+
keep_accents=False,
|
39 |
+
bos_token="<s>",
|
40 |
+
eos_token="</s>",
|
41 |
+
unk_token="<unk>",
|
42 |
+
sep_token="<sep>",
|
43 |
+
pad_token="<pad>",
|
44 |
+
cls_token="<cls>",
|
45 |
+
mask_token="<mask>",
|
46 |
+
additional_special_tokens=["<eop>", "<eod>"],
|
47 |
+
**kwargs,
|
48 |
+
):
|
49 |
+
"""
|
50 |
+
Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
|
51 |
+
[SentencePiece](https://github.com/google/sentencepiece).
|
52 |
+
|
53 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
|
54 |
+
refer to this superclass for more information regarding those methods.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
vocab_file (`str`):
|
58 |
+
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
|
59 |
+
contains the vocabulary necessary to instantiate a tokenizer.
|
60 |
+
do_lower_case (`bool`, *optional*, defaults to `True`):
|
61 |
+
Whether to lowercase the input when tokenizing.
|
62 |
+
remove_space (`bool`, *optional*, defaults to `True`):
|
63 |
+
Whether to strip the text when tokenizing (removing excess spaces before and after the string).
|
64 |
+
keep_accents (`bool`, *optional*, defaults to `False`):
|
65 |
+
Whether to keep accents when tokenizing.
|
66 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
67 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
|
68 |
+
token.
|
69 |
+
|
70 |
+
<Tip>
|
71 |
+
|
72 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
73 |
+
sequence. The token used is the `cls_token`.
|
74 |
+
|
75 |
+
</Tip>
|
76 |
+
|
77 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
78 |
+
The end of sequence token.
|
79 |
+
|
80 |
+
<Tip>
|
81 |
+
|
82 |
+
When building a sequence using special tokens, this is not the token that is used for the end of
|
83 |
+
sequence. The token used is the `sep_token`.
|
84 |
+
|
85 |
+
</Tip>
|
86 |
+
|
87 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
88 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
|
89 |
+
this token instead.
|
90 |
+
sep_token (`str`, *optional*, defaults to `"<sep>"`):
|
91 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
|
92 |
+
for sequence classification or for a text and a question for question answering. It is also used as the
|
93 |
+
last token of a sequence built with special tokens.
|
94 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
95 |
+
The token used for padding, for example when batching sequences of different lengths.
|
96 |
+
cls_token (`str`, *optional*, defaults to `"<cls>"`):
|
97 |
+
The classifier token which is used when doing sequence classification (classification of the whole
|
98 |
+
sequence instead of per-token classification). It is the first token of the sequence when built with
|
99 |
+
special tokens.
|
100 |
+
mask_token (`str`, *optional*, defaults to `"<mask>"`):
|
101 |
+
The token used for masking values. This is the token used when training this model with masked language
|
102 |
+
modeling. This is the token which the model will try to predict.
|
103 |
+
additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
|
104 |
+
Additional special tokens used by the tokenizer.
|
105 |
+
|
106 |
+
Attributes:
|
107 |
+
sp_model (`SentencePieceProcessor`):
|
108 |
+
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
|
109 |
+
"""
|
110 |
+
# Mask token behave like a normal word, i.e. include the space before it
|
111 |
+
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
|
112 |
+
|
113 |
+
super().__init__(
|
114 |
+
vocab_file=vocab_file,
|
115 |
+
tokenizer_file=tokenizer_file,
|
116 |
+
do_lower_case=do_lower_case,
|
117 |
+
remove_space=remove_space,
|
118 |
+
keep_accents=keep_accents,
|
119 |
+
bos_token=bos_token,
|
120 |
+
eos_token=eos_token,
|
121 |
+
unk_token=unk_token,
|
122 |
+
sep_token=sep_token,
|
123 |
+
pad_token=pad_token,
|
124 |
+
cls_token=cls_token,
|
125 |
+
mask_token=mask_token,
|
126 |
+
additional_special_tokens=additional_special_tokens,
|
127 |
+
**kwargs,
|
128 |
+
)
|
129 |
+
|
130 |
+
self._pad_token_type_id = 3
|
131 |
+
self.do_lower_case = do_lower_case
|
132 |
+
self.remove_space = remove_space
|
133 |
+
self.keep_accents = keep_accents
|
134 |
+
self.vocab_file = vocab_file
|
135 |
+
|
136 |
+
try:
|
137 |
+
import jieba
|
138 |
+
except ModuleNotFoundError as error:
|
139 |
+
raise error.__class__(
|
140 |
+
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
|
141 |
+
"See https://pypi.org/project/jieba/ for installation."
|
142 |
+
)
|
143 |
+
self.jieba = jieba
|
144 |
+
self.translator = str.maketrans(" \n", "\u2582\u2583")
|
145 |
+
|
146 |
+
@property
|
147 |
+
def can_save_slow_tokenizer(self) -> bool:
|
148 |
+
return os.path.isfile(self.vocab_file) if self.vocab_file else False
|
149 |
+
|
150 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.build_inputs_with_special_tokens
|
151 |
+
def build_inputs_with_special_tokens(
|
152 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
153 |
+
) -> List[int]:
|
154 |
+
"""
|
155 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
156 |
+
adding special tokens. An XLNet sequence has the following format:
|
157 |
+
|
158 |
+
- single sequence: `X <sep> <cls>`
|
159 |
+
- pair of sequences: `A <sep> B <sep> <cls>`
|
160 |
+
|
161 |
+
Args:
|
162 |
+
token_ids_0 (`List[int]`):
|
163 |
+
List of IDs to which the special tokens will be added.
|
164 |
+
token_ids_1 (`List[int]`, *optional*):
|
165 |
+
Optional second list of IDs for sequence pairs.
|
166 |
+
|
167 |
+
Returns:
|
168 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
169 |
+
"""
|
170 |
+
sep = [self.sep_token_id]
|
171 |
+
cls = [self.cls_token_id]
|
172 |
+
if token_ids_1 is None:
|
173 |
+
return token_ids_0 + sep + cls
|
174 |
+
return token_ids_0 + sep + token_ids_1 + sep + cls
|
175 |
+
|
176 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.create_token_type_ids_from_sequences
|
177 |
+
def create_token_type_ids_from_sequences(
|
178 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
179 |
+
) -> List[int]:
|
180 |
+
"""
|
181 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
|
182 |
+
sequence pair mask has the following format:
|
183 |
+
|
184 |
+
```
|
185 |
+
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
186 |
+
| first sequence | second sequence |
|
187 |
+
```
|
188 |
+
|
189 |
+
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
|
190 |
+
|
191 |
+
Args:
|
192 |
+
token_ids_0 (`List[int]`):
|
193 |
+
List of IDs.
|
194 |
+
token_ids_1 (`List[int]`, *optional*):
|
195 |
+
Optional second list of IDs for sequence pairs.
|
196 |
+
|
197 |
+
Returns:
|
198 |
+
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
199 |
+
"""
|
200 |
+
sep = [self.sep_token_id]
|
201 |
+
cls_segment_id = [2]
|
202 |
+
|
203 |
+
if token_ids_1 is None:
|
204 |
+
return len(token_ids_0 + sep) * [0] + cls_segment_id
|
205 |
+
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
|
206 |
+
|
207 |
+
# Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.save_vocabulary
|
208 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
209 |
+
if not self.can_save_slow_tokenizer:
|
210 |
+
raise ValueError(
|
211 |
+
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
|
212 |
+
"tokenizer."
|
213 |
+
)
|
214 |
+
|
215 |
+
if not os.path.isdir(save_directory):
|
216 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
217 |
+
return
|
218 |
+
out_vocab_file = os.path.join(
|
219 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
220 |
+
)
|
221 |
+
|
222 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
|
223 |
+
copyfile(self.vocab_file, out_vocab_file)
|
224 |
+
|
225 |
+
return (out_vocab_file,)
|
226 |
+
|
227 |
+
def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
|
228 |
+
batch_text_or_text_pairs = [
|
229 |
+
" ".join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)])
|
230 |
+
for text in batch_text_or_text_pairs
|
231 |
+
]
|
232 |
+
return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)
|
233 |
+
|
234 |
+
def _decode(self, *args, **kwargs):
|
235 |
+
text = super()._decode(*args, **kwargs)
|
236 |
+
text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
|
237 |
+
return text
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__init__.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa
|
2 |
+
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
3 |
+
# module, but to preserve other warnings. So, don't check this module at all.
|
4 |
+
|
5 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
6 |
+
#
|
7 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
8 |
+
# you may not use this file except in compliance with the License.
|
9 |
+
# You may obtain a copy of the License at
|
10 |
+
#
|
11 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
12 |
+
#
|
13 |
+
# Unless required by applicable law or agreed to in writing, software
|
14 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
15 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
16 |
+
# See the License for the specific language governing permissions and
|
17 |
+
# limitations under the License.
|
18 |
+
from typing import TYPE_CHECKING
|
19 |
+
|
20 |
+
# rely on isort to merge the imports
|
21 |
+
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
|
22 |
+
|
23 |
+
|
24 |
+
_import_structure = {
|
25 |
+
"configuration_efficientnet": [
|
26 |
+
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
27 |
+
"EfficientNetConfig",
|
28 |
+
"EfficientNetOnnxConfig",
|
29 |
+
]
|
30 |
+
}
|
31 |
+
|
32 |
+
try:
|
33 |
+
if not is_vision_available():
|
34 |
+
raise OptionalDependencyNotAvailable()
|
35 |
+
except OptionalDependencyNotAvailable:
|
36 |
+
pass
|
37 |
+
else:
|
38 |
+
_import_structure["image_processing_efficientnet"] = ["EfficientNetImageProcessor"]
|
39 |
+
|
40 |
+
try:
|
41 |
+
if not is_torch_available():
|
42 |
+
raise OptionalDependencyNotAvailable()
|
43 |
+
except OptionalDependencyNotAvailable:
|
44 |
+
pass
|
45 |
+
else:
|
46 |
+
_import_structure["modeling_efficientnet"] = [
|
47 |
+
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
|
48 |
+
"EfficientNetForImageClassification",
|
49 |
+
"EfficientNetModel",
|
50 |
+
"EfficientNetPreTrainedModel",
|
51 |
+
]
|
52 |
+
|
53 |
+
if TYPE_CHECKING:
|
54 |
+
from .configuration_efficientnet import (
|
55 |
+
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
56 |
+
EfficientNetConfig,
|
57 |
+
EfficientNetOnnxConfig,
|
58 |
+
)
|
59 |
+
|
60 |
+
try:
|
61 |
+
if not is_vision_available():
|
62 |
+
raise OptionalDependencyNotAvailable()
|
63 |
+
except OptionalDependencyNotAvailable:
|
64 |
+
pass
|
65 |
+
else:
|
66 |
+
from .image_processing_efficientnet import EfficientNetImageProcessor
|
67 |
+
|
68 |
+
try:
|
69 |
+
if not is_torch_available():
|
70 |
+
raise OptionalDependencyNotAvailable()
|
71 |
+
except OptionalDependencyNotAvailable:
|
72 |
+
pass
|
73 |
+
else:
|
74 |
+
from .modeling_efficientnet import (
|
75 |
+
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
|
76 |
+
EfficientNetForImageClassification,
|
77 |
+
EfficientNetModel,
|
78 |
+
EfficientNetPreTrainedModel,
|
79 |
+
)
|
80 |
+
|
81 |
+
else:
|
82 |
+
import sys
|
83 |
+
|
84 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.21 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/configuration_efficientnet.cpython-310.pyc
ADDED
Binary file (7.31 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/convert_efficientnet_to_pytorch.cpython-310.pyc
ADDED
Binary file (9.38 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/image_processing_efficientnet.cpython-310.pyc
ADDED
Binary file (15.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/__pycache__/modeling_efficientnet.cpython-310.pyc
ADDED
Binary file (18.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/configuration_efficientnet.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" EfficientNet model configuration"""
|
16 |
+
|
17 |
+
from collections import OrderedDict
|
18 |
+
from typing import List, Mapping
|
19 |
+
|
20 |
+
from packaging import version
|
21 |
+
|
22 |
+
from ...configuration_utils import PretrainedConfig
|
23 |
+
from ...onnx import OnnxConfig
|
24 |
+
from ...utils import logging
|
25 |
+
|
26 |
+
|
27 |
+
logger = logging.get_logger(__name__)
|
28 |
+
|
29 |
+
|
30 |
+
from ..deprecated._archive_maps import EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
31 |
+
|
32 |
+
|
33 |
+
class EfficientNetConfig(PretrainedConfig):
|
34 |
+
r"""
|
35 |
+
This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an
|
36 |
+
EfficientNet model according to the specified arguments, defining the model architecture. Instantiating a
|
37 |
+
configuration with the defaults will yield a similar configuration to that of the EfficientNet
|
38 |
+
[google/efficientnet-b7](https://huggingface.co/google/efficientnet-b7) architecture.
|
39 |
+
|
40 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
41 |
+
documentation from [`PretrainedConfig`] for more information.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
num_channels (`int`, *optional*, defaults to 3):
|
45 |
+
The number of input channels.
|
46 |
+
image_size (`int`, *optional*, defaults to 600):
|
47 |
+
The input image size.
|
48 |
+
width_coefficient (`float`, *optional*, defaults to 2.0):
|
49 |
+
Scaling coefficient for network width at each stage.
|
50 |
+
depth_coefficient (`float`, *optional*, defaults to 3.1):
|
51 |
+
Scaling coefficient for network depth at each stage.
|
52 |
+
depth_divisor `int`, *optional*, defaults to 8):
|
53 |
+
A unit of network width.
|
54 |
+
kernel_sizes (`List[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
|
55 |
+
List of kernel sizes to be used in each block.
|
56 |
+
in_channels (`List[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
|
57 |
+
List of input channel sizes to be used in each block for convolutional layers.
|
58 |
+
out_channels (`List[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
|
59 |
+
List of output channel sizes to be used in each block for convolutional layers.
|
60 |
+
depthwise_padding (`List[int]`, *optional*, defaults to `[]`):
|
61 |
+
List of block indices with square padding.
|
62 |
+
strides (`List[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
|
63 |
+
List of stride sizes to be used in each block for convolutional layers.
|
64 |
+
num_block_repeats (`List[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
|
65 |
+
List of the number of times each block is to repeated.
|
66 |
+
expand_ratios (`List[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
|
67 |
+
List of scaling coefficient of each block.
|
68 |
+
squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
|
69 |
+
Squeeze expansion ratio.
|
70 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
71 |
+
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
|
72 |
+
`"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
|
73 |
+
hiddem_dim (`int`, *optional*, defaults to 1280):
|
74 |
+
The hidden dimension of the layer before the classification head.
|
75 |
+
pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
|
76 |
+
Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
|
77 |
+
`"max"`]
|
78 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
79 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
80 |
+
batch_norm_eps (`float`, *optional*, defaults to 1e-3):
|
81 |
+
The epsilon used by the batch normalization layers.
|
82 |
+
batch_norm_momentum (`float`, *optional*, defaults to 0.99):
|
83 |
+
The momentum used by the batch normalization layers.
|
84 |
+
dropout_rate (`float`, *optional*, defaults to 0.5):
|
85 |
+
The dropout rate to be applied before final classifier layer.
|
86 |
+
drop_connect_rate (`float`, *optional*, defaults to 0.2):
|
87 |
+
The drop rate for skip connections.
|
88 |
+
|
89 |
+
Example:
|
90 |
+
```python
|
91 |
+
>>> from transformers import EfficientNetConfig, EfficientNetModel
|
92 |
+
|
93 |
+
>>> # Initializing a EfficientNet efficientnet-b7 style configuration
|
94 |
+
>>> configuration = EfficientNetConfig()
|
95 |
+
|
96 |
+
>>> # Initializing a model (with random weights) from the efficientnet-b7 style configuration
|
97 |
+
>>> model = EfficientNetModel(configuration)
|
98 |
+
|
99 |
+
>>> # Accessing the model configuration
|
100 |
+
>>> configuration = model.config
|
101 |
+
```"""
|
102 |
+
|
103 |
+
model_type = "efficientnet"
|
104 |
+
|
105 |
+
def __init__(
|
106 |
+
self,
|
107 |
+
num_channels: int = 3,
|
108 |
+
image_size: int = 600,
|
109 |
+
width_coefficient: float = 2.0,
|
110 |
+
depth_coefficient: float = 3.1,
|
111 |
+
depth_divisor: int = 8,
|
112 |
+
kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3],
|
113 |
+
in_channels: List[int] = [32, 16, 24, 40, 80, 112, 192],
|
114 |
+
out_channels: List[int] = [16, 24, 40, 80, 112, 192, 320],
|
115 |
+
depthwise_padding: List[int] = [],
|
116 |
+
strides: List[int] = [1, 2, 2, 2, 1, 2, 1],
|
117 |
+
num_block_repeats: List[int] = [1, 2, 2, 3, 3, 4, 1],
|
118 |
+
expand_ratios: List[int] = [1, 6, 6, 6, 6, 6, 6],
|
119 |
+
squeeze_expansion_ratio: float = 0.25,
|
120 |
+
hidden_act: str = "swish",
|
121 |
+
hidden_dim: int = 2560,
|
122 |
+
pooling_type: str = "mean",
|
123 |
+
initializer_range: float = 0.02,
|
124 |
+
batch_norm_eps: float = 0.001,
|
125 |
+
batch_norm_momentum: float = 0.99,
|
126 |
+
dropout_rate: float = 0.5,
|
127 |
+
drop_connect_rate: float = 0.2,
|
128 |
+
**kwargs,
|
129 |
+
):
|
130 |
+
super().__init__(**kwargs)
|
131 |
+
|
132 |
+
self.num_channels = num_channels
|
133 |
+
self.image_size = image_size
|
134 |
+
self.width_coefficient = width_coefficient
|
135 |
+
self.depth_coefficient = depth_coefficient
|
136 |
+
self.depth_divisor = depth_divisor
|
137 |
+
self.kernel_sizes = kernel_sizes
|
138 |
+
self.in_channels = in_channels
|
139 |
+
self.out_channels = out_channels
|
140 |
+
self.depthwise_padding = depthwise_padding
|
141 |
+
self.strides = strides
|
142 |
+
self.num_block_repeats = num_block_repeats
|
143 |
+
self.expand_ratios = expand_ratios
|
144 |
+
self.squeeze_expansion_ratio = squeeze_expansion_ratio
|
145 |
+
self.hidden_act = hidden_act
|
146 |
+
self.hidden_dim = hidden_dim
|
147 |
+
self.pooling_type = pooling_type
|
148 |
+
self.initializer_range = initializer_range
|
149 |
+
self.batch_norm_eps = batch_norm_eps
|
150 |
+
self.batch_norm_momentum = batch_norm_momentum
|
151 |
+
self.dropout_rate = dropout_rate
|
152 |
+
self.drop_connect_rate = drop_connect_rate
|
153 |
+
self.num_hidden_layers = sum(num_block_repeats) * 4
|
154 |
+
|
155 |
+
|
156 |
+
class EfficientNetOnnxConfig(OnnxConfig):
|
157 |
+
torch_onnx_minimum_version = version.parse("1.11")
|
158 |
+
|
159 |
+
@property
|
160 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
161 |
+
return OrderedDict(
|
162 |
+
[
|
163 |
+
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
164 |
+
]
|
165 |
+
)
|
166 |
+
|
167 |
+
@property
|
168 |
+
def atol_for_validation(self) -> float:
|
169 |
+
return 1e-5
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py
ADDED
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert EfficientNet checkpoints from the original repository.
|
16 |
+
|
17 |
+
URL: https://github.com/keras-team/keras/blob/v2.11.0/keras/applications/efficientnet.py"""
|
18 |
+
|
19 |
+
import argparse
|
20 |
+
import json
|
21 |
+
import os
|
22 |
+
|
23 |
+
import numpy as np
|
24 |
+
import PIL
|
25 |
+
import requests
|
26 |
+
import tensorflow.keras.applications.efficientnet as efficientnet
|
27 |
+
import torch
|
28 |
+
from huggingface_hub import hf_hub_download
|
29 |
+
from PIL import Image
|
30 |
+
from tensorflow.keras.preprocessing import image
|
31 |
+
|
32 |
+
from transformers import (
|
33 |
+
EfficientNetConfig,
|
34 |
+
EfficientNetForImageClassification,
|
35 |
+
EfficientNetImageProcessor,
|
36 |
+
)
|
37 |
+
from transformers.utils import logging
|
38 |
+
|
39 |
+
|
40 |
+
logging.set_verbosity_info()
|
41 |
+
logger = logging.get_logger(__name__)
|
42 |
+
|
43 |
+
model_classes = {
|
44 |
+
"b0": efficientnet.EfficientNetB0,
|
45 |
+
"b1": efficientnet.EfficientNetB1,
|
46 |
+
"b2": efficientnet.EfficientNetB2,
|
47 |
+
"b3": efficientnet.EfficientNetB3,
|
48 |
+
"b4": efficientnet.EfficientNetB4,
|
49 |
+
"b5": efficientnet.EfficientNetB5,
|
50 |
+
"b6": efficientnet.EfficientNetB6,
|
51 |
+
"b7": efficientnet.EfficientNetB7,
|
52 |
+
}
|
53 |
+
|
54 |
+
CONFIG_MAP = {
|
55 |
+
"b0": {
|
56 |
+
"hidden_dim": 1280,
|
57 |
+
"width_coef": 1.0,
|
58 |
+
"depth_coef": 1.0,
|
59 |
+
"image_size": 224,
|
60 |
+
"dropout_rate": 0.2,
|
61 |
+
"dw_padding": [],
|
62 |
+
},
|
63 |
+
"b1": {
|
64 |
+
"hidden_dim": 1280,
|
65 |
+
"width_coef": 1.0,
|
66 |
+
"depth_coef": 1.1,
|
67 |
+
"image_size": 240,
|
68 |
+
"dropout_rate": 0.2,
|
69 |
+
"dw_padding": [16],
|
70 |
+
},
|
71 |
+
"b2": {
|
72 |
+
"hidden_dim": 1408,
|
73 |
+
"width_coef": 1.1,
|
74 |
+
"depth_coef": 1.2,
|
75 |
+
"image_size": 260,
|
76 |
+
"dropout_rate": 0.3,
|
77 |
+
"dw_padding": [5, 8, 16],
|
78 |
+
},
|
79 |
+
"b3": {
|
80 |
+
"hidden_dim": 1536,
|
81 |
+
"width_coef": 1.2,
|
82 |
+
"depth_coef": 1.4,
|
83 |
+
"image_size": 300,
|
84 |
+
"dropout_rate": 0.3,
|
85 |
+
"dw_padding": [5, 18],
|
86 |
+
},
|
87 |
+
"b4": {
|
88 |
+
"hidden_dim": 1792,
|
89 |
+
"width_coef": 1.4,
|
90 |
+
"depth_coef": 1.8,
|
91 |
+
"image_size": 380,
|
92 |
+
"dropout_rate": 0.4,
|
93 |
+
"dw_padding": [6],
|
94 |
+
},
|
95 |
+
"b5": {
|
96 |
+
"hidden_dim": 2048,
|
97 |
+
"width_coef": 1.6,
|
98 |
+
"depth_coef": 2.2,
|
99 |
+
"image_size": 456,
|
100 |
+
"dropout_rate": 0.4,
|
101 |
+
"dw_padding": [13, 27],
|
102 |
+
},
|
103 |
+
"b6": {
|
104 |
+
"hidden_dim": 2304,
|
105 |
+
"width_coef": 1.8,
|
106 |
+
"depth_coef": 2.6,
|
107 |
+
"image_size": 528,
|
108 |
+
"dropout_rate": 0.5,
|
109 |
+
"dw_padding": [31],
|
110 |
+
},
|
111 |
+
"b7": {
|
112 |
+
"hidden_dim": 2560,
|
113 |
+
"width_coef": 2.0,
|
114 |
+
"depth_coef": 3.1,
|
115 |
+
"image_size": 600,
|
116 |
+
"dropout_rate": 0.5,
|
117 |
+
"dw_padding": [18],
|
118 |
+
},
|
119 |
+
}
|
120 |
+
|
121 |
+
|
122 |
+
def get_efficientnet_config(model_name):
|
123 |
+
config = EfficientNetConfig()
|
124 |
+
config.hidden_dim = CONFIG_MAP[model_name]["hidden_dim"]
|
125 |
+
config.width_coefficient = CONFIG_MAP[model_name]["width_coef"]
|
126 |
+
config.depth_coefficient = CONFIG_MAP[model_name]["depth_coef"]
|
127 |
+
config.image_size = CONFIG_MAP[model_name]["image_size"]
|
128 |
+
config.dropout_rate = CONFIG_MAP[model_name]["dropout_rate"]
|
129 |
+
config.depthwise_padding = CONFIG_MAP[model_name]["dw_padding"]
|
130 |
+
|
131 |
+
repo_id = "huggingface/label-files"
|
132 |
+
filename = "imagenet-1k-id2label.json"
|
133 |
+
config.num_labels = 1000
|
134 |
+
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
135 |
+
id2label = {int(k): v for k, v in id2label.items()}
|
136 |
+
|
137 |
+
config.id2label = id2label
|
138 |
+
config.label2id = {v: k for k, v in id2label.items()}
|
139 |
+
return config
|
140 |
+
|
141 |
+
|
142 |
+
# We will verify our results on an image of cute cats
|
143 |
+
def prepare_img():
|
144 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
145 |
+
im = Image.open(requests.get(url, stream=True).raw)
|
146 |
+
return im
|
147 |
+
|
148 |
+
|
149 |
+
def convert_image_processor(model_name):
|
150 |
+
size = CONFIG_MAP[model_name]["image_size"]
|
151 |
+
preprocessor = EfficientNetImageProcessor(
|
152 |
+
size={"height": size, "width": size},
|
153 |
+
image_mean=[0.485, 0.456, 0.406],
|
154 |
+
image_std=[0.47853944, 0.4732864, 0.47434163],
|
155 |
+
do_center_crop=False,
|
156 |
+
)
|
157 |
+
return preprocessor
|
158 |
+
|
159 |
+
|
160 |
+
# here we list all keys to be renamed (original name on the left, our name on the right)
|
161 |
+
def rename_keys(original_param_names):
|
162 |
+
block_names = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")]
|
163 |
+
block_names = sorted(set(block_names))
|
164 |
+
num_blocks = len(block_names)
|
165 |
+
block_name_mapping = {b: str(i) for b, i in zip(block_names, range(num_blocks))}
|
166 |
+
|
167 |
+
rename_keys = []
|
168 |
+
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight"))
|
169 |
+
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight"))
|
170 |
+
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias"))
|
171 |
+
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean"))
|
172 |
+
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var"))
|
173 |
+
|
174 |
+
for b in block_names:
|
175 |
+
hf_b = block_name_mapping[b]
|
176 |
+
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight"))
|
177 |
+
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight"))
|
178 |
+
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias"))
|
179 |
+
rename_keys.append(
|
180 |
+
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean")
|
181 |
+
)
|
182 |
+
rename_keys.append(
|
183 |
+
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var")
|
184 |
+
)
|
185 |
+
rename_keys.append(
|
186 |
+
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight")
|
187 |
+
)
|
188 |
+
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight"))
|
189 |
+
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias"))
|
190 |
+
rename_keys.append(
|
191 |
+
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean")
|
192 |
+
)
|
193 |
+
rename_keys.append(
|
194 |
+
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var")
|
195 |
+
)
|
196 |
+
|
197 |
+
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight"))
|
198 |
+
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias"))
|
199 |
+
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight"))
|
200 |
+
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias"))
|
201 |
+
rename_keys.append(
|
202 |
+
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight")
|
203 |
+
)
|
204 |
+
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight"))
|
205 |
+
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias"))
|
206 |
+
rename_keys.append(
|
207 |
+
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean")
|
208 |
+
)
|
209 |
+
rename_keys.append(
|
210 |
+
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var")
|
211 |
+
)
|
212 |
+
|
213 |
+
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight"))
|
214 |
+
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight"))
|
215 |
+
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias"))
|
216 |
+
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean"))
|
217 |
+
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var"))
|
218 |
+
|
219 |
+
key_mapping = {}
|
220 |
+
for item in rename_keys:
|
221 |
+
if item[0] in original_param_names:
|
222 |
+
key_mapping[item[0]] = "efficientnet." + item[1]
|
223 |
+
|
224 |
+
key_mapping["predictions/kernel:0"] = "classifier.weight"
|
225 |
+
key_mapping["predictions/bias:0"] = "classifier.bias"
|
226 |
+
return key_mapping
|
227 |
+
|
228 |
+
|
229 |
+
def replace_params(hf_params, tf_params, key_mapping):
|
230 |
+
for key, value in tf_params.items():
|
231 |
+
if "normalization" in key:
|
232 |
+
continue
|
233 |
+
|
234 |
+
hf_key = key_mapping[key]
|
235 |
+
if "_conv" in key and "kernel" in key:
|
236 |
+
new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1)
|
237 |
+
elif "depthwise_kernel" in key:
|
238 |
+
new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1)
|
239 |
+
elif "kernel" in key:
|
240 |
+
new_hf_value = torch.from_numpy(np.transpose(value))
|
241 |
+
else:
|
242 |
+
new_hf_value = torch.from_numpy(value)
|
243 |
+
|
244 |
+
# Replace HF parameters with original TF model parameters
|
245 |
+
assert hf_params[hf_key].shape == new_hf_value.shape
|
246 |
+
hf_params[hf_key].copy_(new_hf_value)
|
247 |
+
|
248 |
+
|
249 |
+
@torch.no_grad()
|
250 |
+
def convert_efficientnet_checkpoint(model_name, pytorch_dump_folder_path, save_model, push_to_hub):
|
251 |
+
"""
|
252 |
+
Copy/paste/tweak model's weights to our EfficientNet structure.
|
253 |
+
"""
|
254 |
+
# Load original model
|
255 |
+
original_model = model_classes[model_name](
|
256 |
+
include_top=True,
|
257 |
+
weights="imagenet",
|
258 |
+
input_tensor=None,
|
259 |
+
input_shape=None,
|
260 |
+
pooling=None,
|
261 |
+
classes=1000,
|
262 |
+
classifier_activation="softmax",
|
263 |
+
)
|
264 |
+
|
265 |
+
tf_params = original_model.trainable_variables
|
266 |
+
tf_non_train_params = original_model.non_trainable_variables
|
267 |
+
tf_params = {param.name: param.numpy() for param in tf_params}
|
268 |
+
for param in tf_non_train_params:
|
269 |
+
tf_params[param.name] = param.numpy()
|
270 |
+
tf_param_names = list(tf_params.keys())
|
271 |
+
|
272 |
+
# Load HuggingFace model
|
273 |
+
config = get_efficientnet_config(model_name)
|
274 |
+
hf_model = EfficientNetForImageClassification(config).eval()
|
275 |
+
hf_params = hf_model.state_dict()
|
276 |
+
|
277 |
+
# Create src-to-dst parameter name mapping dictionary
|
278 |
+
print("Converting parameters...")
|
279 |
+
key_mapping = rename_keys(tf_param_names)
|
280 |
+
replace_params(hf_params, tf_params, key_mapping)
|
281 |
+
|
282 |
+
# Initialize preprocessor and preprocess input image
|
283 |
+
preprocessor = convert_image_processor(model_name)
|
284 |
+
inputs = preprocessor(images=prepare_img(), return_tensors="pt")
|
285 |
+
|
286 |
+
# HF model inference
|
287 |
+
hf_model.eval()
|
288 |
+
with torch.no_grad():
|
289 |
+
outputs = hf_model(**inputs)
|
290 |
+
hf_logits = outputs.logits.detach().numpy()
|
291 |
+
|
292 |
+
# Original model inference
|
293 |
+
original_model.trainable = False
|
294 |
+
image_size = CONFIG_MAP[model_name]["image_size"]
|
295 |
+
img = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST)
|
296 |
+
x = image.img_to_array(img)
|
297 |
+
x = np.expand_dims(x, axis=0)
|
298 |
+
original_logits = original_model.predict(x)
|
299 |
+
|
300 |
+
# Check whether original and HF model outputs match -> np.allclose
|
301 |
+
assert np.allclose(original_logits, hf_logits, atol=1e-3), "The predicted logits are not the same."
|
302 |
+
print("Model outputs match!")
|
303 |
+
|
304 |
+
if save_model:
|
305 |
+
# Create folder to save model
|
306 |
+
if not os.path.isdir(pytorch_dump_folder_path):
|
307 |
+
os.mkdir(pytorch_dump_folder_path)
|
308 |
+
# Save converted model and image processor
|
309 |
+
hf_model.save_pretrained(pytorch_dump_folder_path)
|
310 |
+
preprocessor.save_pretrained(pytorch_dump_folder_path)
|
311 |
+
|
312 |
+
if push_to_hub:
|
313 |
+
# Push model and image processor to hub
|
314 |
+
print(f"Pushing converted {model_name} to the hub...")
|
315 |
+
model_name = f"efficientnet-{model_name}"
|
316 |
+
preprocessor.push_to_hub(model_name)
|
317 |
+
hf_model.push_to_hub(model_name)
|
318 |
+
|
319 |
+
|
320 |
+
if __name__ == "__main__":
|
321 |
+
parser = argparse.ArgumentParser()
|
322 |
+
# Required parameters
|
323 |
+
parser.add_argument(
|
324 |
+
"--model_name",
|
325 |
+
default="b0",
|
326 |
+
type=str,
|
327 |
+
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
|
328 |
+
)
|
329 |
+
parser.add_argument(
|
330 |
+
"--pytorch_dump_folder_path",
|
331 |
+
default="hf_model",
|
332 |
+
type=str,
|
333 |
+
help="Path to the output PyTorch model directory.",
|
334 |
+
)
|
335 |
+
parser.add_argument("--save_model", action="store_true", help="Save model to local")
|
336 |
+
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
|
337 |
+
|
338 |
+
args = parser.parse_args()
|
339 |
+
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/image_processing_efficientnet.py
ADDED
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Image processor class for EfficientNet."""
|
16 |
+
|
17 |
+
from typing import Dict, List, Optional, Union
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
|
21 |
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
22 |
+
from ...image_transforms import rescale, resize, to_channel_dimension_format
|
23 |
+
from ...image_utils import (
|
24 |
+
IMAGENET_STANDARD_MEAN,
|
25 |
+
IMAGENET_STANDARD_STD,
|
26 |
+
ChannelDimension,
|
27 |
+
ImageInput,
|
28 |
+
PILImageResampling,
|
29 |
+
infer_channel_dimension_format,
|
30 |
+
is_scaled_image,
|
31 |
+
make_list_of_images,
|
32 |
+
to_numpy_array,
|
33 |
+
valid_images,
|
34 |
+
validate_kwargs,
|
35 |
+
validate_preprocess_arguments,
|
36 |
+
)
|
37 |
+
from ...utils import TensorType, is_vision_available, logging
|
38 |
+
|
39 |
+
|
40 |
+
if is_vision_available():
|
41 |
+
import PIL
|
42 |
+
|
43 |
+
|
44 |
+
logger = logging.get_logger(__name__)
|
45 |
+
|
46 |
+
|
47 |
+
class EfficientNetImageProcessor(BaseImageProcessor):
|
48 |
+
r"""
|
49 |
+
Constructs a EfficientNet image processor.
|
50 |
+
|
51 |
+
Args:
|
52 |
+
do_resize (`bool`, *optional*, defaults to `True`):
|
53 |
+
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
|
54 |
+
`do_resize` in `preprocess`.
|
55 |
+
size (`Dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`):
|
56 |
+
Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
|
57 |
+
resample (`PILImageResampling` filter, *optional*, defaults to 0):
|
58 |
+
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
|
59 |
+
do_center_crop (`bool`, *optional*, defaults to `False`):
|
60 |
+
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
|
61 |
+
is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
|
62 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`):
|
63 |
+
Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
|
64 |
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
65 |
+
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
|
66 |
+
`preprocess` method.
|
67 |
+
rescale_offset (`bool`, *optional*, defaults to `False`):
|
68 |
+
Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be
|
69 |
+
overridden by the `rescale_factor` parameter in the `preprocess` method.
|
70 |
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
71 |
+
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
|
72 |
+
parameter in the `preprocess` method.
|
73 |
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
74 |
+
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
|
75 |
+
method.
|
76 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
|
77 |
+
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
78 |
+
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
|
79 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
|
80 |
+
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
81 |
+
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
82 |
+
include_top (`bool`, *optional*, defaults to `True`):
|
83 |
+
Whether to rescale the image again. Should be set to True if the inputs are used for image classification.
|
84 |
+
"""
|
85 |
+
|
86 |
+
model_input_names = ["pixel_values"]
|
87 |
+
|
88 |
+
def __init__(
|
89 |
+
self,
|
90 |
+
do_resize: bool = True,
|
91 |
+
size: Dict[str, int] = None,
|
92 |
+
resample: PILImageResampling = PIL.Image.NEAREST,
|
93 |
+
do_center_crop: bool = False,
|
94 |
+
crop_size: Dict[str, int] = None,
|
95 |
+
rescale_factor: Union[int, float] = 1 / 255,
|
96 |
+
rescale_offset: bool = False,
|
97 |
+
do_rescale: bool = True,
|
98 |
+
do_normalize: bool = True,
|
99 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
100 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
101 |
+
include_top: bool = True,
|
102 |
+
**kwargs,
|
103 |
+
) -> None:
|
104 |
+
super().__init__(**kwargs)
|
105 |
+
size = size if size is not None else {"height": 346, "width": 346}
|
106 |
+
size = get_size_dict(size)
|
107 |
+
crop_size = crop_size if crop_size is not None else {"height": 289, "width": 289}
|
108 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
109 |
+
|
110 |
+
self.do_resize = do_resize
|
111 |
+
self.size = size
|
112 |
+
self.resample = resample
|
113 |
+
self.do_center_crop = do_center_crop
|
114 |
+
self.crop_size = crop_size
|
115 |
+
self.do_rescale = do_rescale
|
116 |
+
self.rescale_factor = rescale_factor
|
117 |
+
self.rescale_offset = rescale_offset
|
118 |
+
self.do_normalize = do_normalize
|
119 |
+
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
|
120 |
+
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
|
121 |
+
self.include_top = include_top
|
122 |
+
self._valid_processor_keys = [
|
123 |
+
"images",
|
124 |
+
"do_resize",
|
125 |
+
"size",
|
126 |
+
"resample",
|
127 |
+
"do_center_crop",
|
128 |
+
"crop_size",
|
129 |
+
"do_rescale",
|
130 |
+
"rescale_factor",
|
131 |
+
"rescale_offset",
|
132 |
+
"do_normalize",
|
133 |
+
"image_mean",
|
134 |
+
"image_std",
|
135 |
+
"include_top",
|
136 |
+
"return_tensors",
|
137 |
+
"data_format",
|
138 |
+
"input_data_format",
|
139 |
+
]
|
140 |
+
|
141 |
+
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.NEAREST
|
142 |
+
def resize(
|
143 |
+
self,
|
144 |
+
image: np.ndarray,
|
145 |
+
size: Dict[str, int],
|
146 |
+
resample: PILImageResampling = PILImageResampling.NEAREST,
|
147 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
148 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
149 |
+
**kwargs,
|
150 |
+
) -> np.ndarray:
|
151 |
+
"""
|
152 |
+
Resize an image to `(size["height"], size["width"])`.
|
153 |
+
|
154 |
+
Args:
|
155 |
+
image (`np.ndarray`):
|
156 |
+
Image to resize.
|
157 |
+
size (`Dict[str, int]`):
|
158 |
+
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
|
159 |
+
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.NEAREST`):
|
160 |
+
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.NEAREST`.
|
161 |
+
data_format (`ChannelDimension` or `str`, *optional*):
|
162 |
+
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
163 |
+
image is used. Can be one of:
|
164 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
165 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
166 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
167 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
168 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
169 |
+
from the input image. Can be one of:
|
170 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
171 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
172 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
173 |
+
|
174 |
+
Returns:
|
175 |
+
`np.ndarray`: The resized image.
|
176 |
+
"""
|
177 |
+
size = get_size_dict(size)
|
178 |
+
if "height" not in size or "width" not in size:
|
179 |
+
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
|
180 |
+
output_size = (size["height"], size["width"])
|
181 |
+
return resize(
|
182 |
+
image,
|
183 |
+
size=output_size,
|
184 |
+
resample=resample,
|
185 |
+
data_format=data_format,
|
186 |
+
input_data_format=input_data_format,
|
187 |
+
**kwargs,
|
188 |
+
)
|
189 |
+
|
190 |
+
def rescale(
|
191 |
+
self,
|
192 |
+
image: np.ndarray,
|
193 |
+
scale: Union[int, float],
|
194 |
+
offset: bool = True,
|
195 |
+
data_format: Optional[Union[str, ChannelDimension]] = None,
|
196 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
197 |
+
**kwargs,
|
198 |
+
):
|
199 |
+
"""
|
200 |
+
Rescale an image by a scale factor.
|
201 |
+
|
202 |
+
If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is
|
203 |
+
1/127.5, the image is rescaled between [-1, 1].
|
204 |
+
image = image * scale - 1
|
205 |
+
|
206 |
+
If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].
|
207 |
+
image = image * scale
|
208 |
+
|
209 |
+
Args:
|
210 |
+
image (`np.ndarray`):
|
211 |
+
Image to rescale.
|
212 |
+
scale (`int` or `float`):
|
213 |
+
Scale to apply to the image.
|
214 |
+
offset (`bool`, *optional*):
|
215 |
+
Whether to scale the image in both negative and positive directions.
|
216 |
+
data_format (`str` or `ChannelDimension`, *optional*):
|
217 |
+
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
218 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
219 |
+
The channel dimension format of the input image. If not provided, it will be inferred.
|
220 |
+
"""
|
221 |
+
rescaled_image = rescale(
|
222 |
+
image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs
|
223 |
+
)
|
224 |
+
|
225 |
+
if offset:
|
226 |
+
rescaled_image = rescaled_image - 1
|
227 |
+
|
228 |
+
return rescaled_image
|
229 |
+
|
230 |
+
def preprocess(
|
231 |
+
self,
|
232 |
+
images: ImageInput,
|
233 |
+
do_resize: bool = None,
|
234 |
+
size: Dict[str, int] = None,
|
235 |
+
resample=None,
|
236 |
+
do_center_crop: bool = None,
|
237 |
+
crop_size: Dict[str, int] = None,
|
238 |
+
do_rescale: bool = None,
|
239 |
+
rescale_factor: float = None,
|
240 |
+
rescale_offset: bool = None,
|
241 |
+
do_normalize: bool = None,
|
242 |
+
image_mean: Optional[Union[float, List[float]]] = None,
|
243 |
+
image_std: Optional[Union[float, List[float]]] = None,
|
244 |
+
include_top: bool = None,
|
245 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
246 |
+
data_format: ChannelDimension = ChannelDimension.FIRST,
|
247 |
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
248 |
+
**kwargs,
|
249 |
+
) -> PIL.Image.Image:
|
250 |
+
"""
|
251 |
+
Preprocess an image or batch of images.
|
252 |
+
|
253 |
+
Args:
|
254 |
+
images (`ImageInput`):
|
255 |
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
256 |
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
257 |
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
258 |
+
Whether to resize the image.
|
259 |
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
260 |
+
Size of the image after `resize`.
|
261 |
+
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
|
262 |
+
PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
|
263 |
+
`True`.
|
264 |
+
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
|
265 |
+
Whether to center crop the image.
|
266 |
+
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
|
267 |
+
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
|
268 |
+
padded with zeros and then cropped
|
269 |
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
270 |
+
Whether to rescale the image values between [0 - 1].
|
271 |
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
272 |
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
273 |
+
rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`):
|
274 |
+
Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range].
|
275 |
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
276 |
+
Whether to normalize the image.
|
277 |
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
278 |
+
Image mean.
|
279 |
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
280 |
+
Image standard deviation.
|
281 |
+
include_top (`bool`, *optional*, defaults to `self.include_top`):
|
282 |
+
Rescales the image again for image classification if set to True.
|
283 |
+
return_tensors (`str` or `TensorType`, *optional*):
|
284 |
+
The type of tensors to return. Can be one of:
|
285 |
+
- `None`: Return a list of `np.ndarray`.
|
286 |
+
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
|
287 |
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
288 |
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
289 |
+
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
|
290 |
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
291 |
+
The channel dimension format for the output image. Can be one of:
|
292 |
+
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
293 |
+
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
294 |
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
295 |
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
296 |
+
from the input image. Can be one of:
|
297 |
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
298 |
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
299 |
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
300 |
+
"""
|
301 |
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
302 |
+
resample = resample if resample is not None else self.resample
|
303 |
+
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
|
304 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
305 |
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
306 |
+
rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset
|
307 |
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
308 |
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
309 |
+
image_std = image_std if image_std is not None else self.image_std
|
310 |
+
include_top = include_top if include_top is not None else self.include_top
|
311 |
+
|
312 |
+
size = size if size is not None else self.size
|
313 |
+
size = get_size_dict(size)
|
314 |
+
crop_size = crop_size if crop_size is not None else self.crop_size
|
315 |
+
crop_size = get_size_dict(crop_size, param_name="crop_size")
|
316 |
+
|
317 |
+
images = make_list_of_images(images)
|
318 |
+
|
319 |
+
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
|
320 |
+
|
321 |
+
if not valid_images(images):
|
322 |
+
raise ValueError(
|
323 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
324 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
325 |
+
)
|
326 |
+
validate_preprocess_arguments(
|
327 |
+
do_rescale=do_rescale,
|
328 |
+
rescale_factor=rescale_factor,
|
329 |
+
do_normalize=do_normalize,
|
330 |
+
image_mean=image_mean,
|
331 |
+
image_std=image_std,
|
332 |
+
do_center_crop=do_center_crop,
|
333 |
+
crop_size=crop_size,
|
334 |
+
do_resize=do_resize,
|
335 |
+
size=size,
|
336 |
+
resample=resample,
|
337 |
+
)
|
338 |
+
# All transformations expect numpy arrays.
|
339 |
+
images = [to_numpy_array(image) for image in images]
|
340 |
+
|
341 |
+
if is_scaled_image(images[0]) and do_rescale:
|
342 |
+
logger.warning_once(
|
343 |
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
344 |
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
345 |
+
)
|
346 |
+
|
347 |
+
if input_data_format is None:
|
348 |
+
# We assume that all images have the same channel dimension format.
|
349 |
+
input_data_format = infer_channel_dimension_format(images[0])
|
350 |
+
|
351 |
+
if do_resize:
|
352 |
+
images = [
|
353 |
+
self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
|
354 |
+
for image in images
|
355 |
+
]
|
356 |
+
|
357 |
+
if do_center_crop:
|
358 |
+
images = [
|
359 |
+
self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
|
360 |
+
]
|
361 |
+
|
362 |
+
if do_rescale:
|
363 |
+
images = [
|
364 |
+
self.rescale(
|
365 |
+
image=image, scale=rescale_factor, offset=rescale_offset, input_data_format=input_data_format
|
366 |
+
)
|
367 |
+
for image in images
|
368 |
+
]
|
369 |
+
|
370 |
+
if do_normalize:
|
371 |
+
images = [
|
372 |
+
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
|
373 |
+
for image in images
|
374 |
+
]
|
375 |
+
|
376 |
+
if include_top:
|
377 |
+
images = [
|
378 |
+
self.normalize(image=image, mean=0, std=image_std, input_data_format=input_data_format)
|
379 |
+
for image in images
|
380 |
+
]
|
381 |
+
|
382 |
+
images = [
|
383 |
+
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
|
384 |
+
]
|
385 |
+
|
386 |
+
data = {"pixel_values": images}
|
387 |
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientnet/modeling_efficientnet.py
ADDED
@@ -0,0 +1,648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" PyTorch EfficientNet model."""
|
16 |
+
|
17 |
+
|
18 |
+
import math
|
19 |
+
from typing import Optional, Tuple, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.utils.checkpoint
|
23 |
+
from torch import nn
|
24 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
25 |
+
|
26 |
+
from ...activations import ACT2FN
|
27 |
+
from ...modeling_outputs import (
|
28 |
+
BaseModelOutputWithNoAttention,
|
29 |
+
BaseModelOutputWithPoolingAndNoAttention,
|
30 |
+
ImageClassifierOutputWithNoAttention,
|
31 |
+
)
|
32 |
+
from ...modeling_utils import PreTrainedModel
|
33 |
+
from ...utils import (
|
34 |
+
add_code_sample_docstrings,
|
35 |
+
add_start_docstrings,
|
36 |
+
add_start_docstrings_to_model_forward,
|
37 |
+
logging,
|
38 |
+
)
|
39 |
+
from .configuration_efficientnet import EfficientNetConfig
|
40 |
+
|
41 |
+
|
42 |
+
logger = logging.get_logger(__name__)
|
43 |
+
|
44 |
+
# General docstring
|
45 |
+
_CONFIG_FOR_DOC = "EfficientNetConfig"
|
46 |
+
|
47 |
+
# Base docstring
|
48 |
+
_CHECKPOINT_FOR_DOC = "google/efficientnet-b7"
|
49 |
+
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
|
50 |
+
|
51 |
+
# Image classification docstring
|
52 |
+
_IMAGE_CLASS_CHECKPOINT = "google/efficientnet-b7"
|
53 |
+
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
|
54 |
+
|
55 |
+
|
56 |
+
from ..deprecated._archive_maps import EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
|
57 |
+
|
58 |
+
|
59 |
+
EFFICIENTNET_START_DOCSTRING = r"""
|
60 |
+
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
|
61 |
+
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
|
62 |
+
behavior.
|
63 |
+
|
64 |
+
Parameters:
|
65 |
+
config ([`EfficientNetConfig`]): Model configuration class with all the parameters of the model.
|
66 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
67 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
68 |
+
"""
|
69 |
+
|
70 |
+
EFFICIENTNET_INPUTS_DOCSTRING = r"""
|
71 |
+
Args:
|
72 |
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
|
73 |
+
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
|
74 |
+
[`AutoImageProcessor.__call__`] for details.
|
75 |
+
|
76 |
+
output_hidden_states (`bool`, *optional*):
|
77 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
78 |
+
more detail.
|
79 |
+
return_dict (`bool`, *optional*):
|
80 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
81 |
+
"""
|
82 |
+
|
83 |
+
|
84 |
+
def round_filters(config: EfficientNetConfig, num_channels: int):
|
85 |
+
r"""
|
86 |
+
Round number of filters based on depth multiplier.
|
87 |
+
"""
|
88 |
+
divisor = config.depth_divisor
|
89 |
+
num_channels *= config.width_coefficient
|
90 |
+
new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
|
91 |
+
|
92 |
+
# Make sure that round down does not go down by more than 10%.
|
93 |
+
if new_dim < 0.9 * num_channels:
|
94 |
+
new_dim += divisor
|
95 |
+
|
96 |
+
return int(new_dim)
|
97 |
+
|
98 |
+
|
99 |
+
def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True):
|
100 |
+
r"""
|
101 |
+
Utility function to get the tuple padding value for the depthwise convolution.
|
102 |
+
|
103 |
+
Args:
|
104 |
+
kernel_size (`int` or `tuple`):
|
105 |
+
Kernel size of the convolution layers.
|
106 |
+
adjust (`bool`, *optional*, defaults to `True`):
|
107 |
+
Adjusts padding value to apply to right and bottom sides of the input.
|
108 |
+
"""
|
109 |
+
if isinstance(kernel_size, int):
|
110 |
+
kernel_size = (kernel_size, kernel_size)
|
111 |
+
|
112 |
+
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
|
113 |
+
if adjust:
|
114 |
+
return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
|
115 |
+
else:
|
116 |
+
return (correct[1], correct[1], correct[0], correct[0])
|
117 |
+
|
118 |
+
|
119 |
+
class EfficientNetEmbeddings(nn.Module):
|
120 |
+
r"""
|
121 |
+
A module that corresponds to the stem module of the original work.
|
122 |
+
"""
|
123 |
+
|
124 |
+
def __init__(self, config: EfficientNetConfig):
|
125 |
+
super().__init__()
|
126 |
+
|
127 |
+
self.out_dim = round_filters(config, 32)
|
128 |
+
self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
|
129 |
+
self.convolution = nn.Conv2d(
|
130 |
+
config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False
|
131 |
+
)
|
132 |
+
self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
|
133 |
+
self.activation = ACT2FN[config.hidden_act]
|
134 |
+
|
135 |
+
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
136 |
+
features = self.padding(pixel_values)
|
137 |
+
features = self.convolution(features)
|
138 |
+
features = self.batchnorm(features)
|
139 |
+
features = self.activation(features)
|
140 |
+
|
141 |
+
return features
|
142 |
+
|
143 |
+
|
144 |
+
class EfficientNetDepthwiseConv2d(nn.Conv2d):
|
145 |
+
def __init__(
|
146 |
+
self,
|
147 |
+
in_channels,
|
148 |
+
depth_multiplier=1,
|
149 |
+
kernel_size=3,
|
150 |
+
stride=1,
|
151 |
+
padding=0,
|
152 |
+
dilation=1,
|
153 |
+
bias=True,
|
154 |
+
padding_mode="zeros",
|
155 |
+
):
|
156 |
+
out_channels = in_channels * depth_multiplier
|
157 |
+
super().__init__(
|
158 |
+
in_channels=in_channels,
|
159 |
+
out_channels=out_channels,
|
160 |
+
kernel_size=kernel_size,
|
161 |
+
stride=stride,
|
162 |
+
padding=padding,
|
163 |
+
dilation=dilation,
|
164 |
+
groups=in_channels,
|
165 |
+
bias=bias,
|
166 |
+
padding_mode=padding_mode,
|
167 |
+
)
|
168 |
+
|
169 |
+
|
170 |
+
class EfficientNetExpansionLayer(nn.Module):
|
171 |
+
r"""
|
172 |
+
This corresponds to the expansion phase of each block in the original implementation.
|
173 |
+
"""
|
174 |
+
|
175 |
+
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int):
|
176 |
+
super().__init__()
|
177 |
+
self.expand_conv = nn.Conv2d(
|
178 |
+
in_channels=in_dim,
|
179 |
+
out_channels=out_dim,
|
180 |
+
kernel_size=1,
|
181 |
+
padding="same",
|
182 |
+
bias=False,
|
183 |
+
)
|
184 |
+
self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
|
185 |
+
self.expand_act = ACT2FN[config.hidden_act]
|
186 |
+
|
187 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
188 |
+
# Expand phase
|
189 |
+
hidden_states = self.expand_conv(hidden_states)
|
190 |
+
hidden_states = self.expand_bn(hidden_states)
|
191 |
+
hidden_states = self.expand_act(hidden_states)
|
192 |
+
|
193 |
+
return hidden_states
|
194 |
+
|
195 |
+
|
196 |
+
class EfficientNetDepthwiseLayer(nn.Module):
|
197 |
+
r"""
|
198 |
+
This corresponds to the depthwise convolution phase of each block in the original implementation.
|
199 |
+
"""
|
200 |
+
|
201 |
+
def __init__(
|
202 |
+
self,
|
203 |
+
config: EfficientNetConfig,
|
204 |
+
in_dim: int,
|
205 |
+
stride: int,
|
206 |
+
kernel_size: int,
|
207 |
+
adjust_padding: bool,
|
208 |
+
):
|
209 |
+
super().__init__()
|
210 |
+
self.stride = stride
|
211 |
+
conv_pad = "valid" if self.stride == 2 else "same"
|
212 |
+
padding = correct_pad(kernel_size, adjust=adjust_padding)
|
213 |
+
|
214 |
+
self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
|
215 |
+
self.depthwise_conv = EfficientNetDepthwiseConv2d(
|
216 |
+
in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False
|
217 |
+
)
|
218 |
+
self.depthwise_norm = nn.BatchNorm2d(
|
219 |
+
num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
|
220 |
+
)
|
221 |
+
self.depthwise_act = ACT2FN[config.hidden_act]
|
222 |
+
|
223 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
224 |
+
# Depthwise convolution
|
225 |
+
if self.stride == 2:
|
226 |
+
hidden_states = self.depthwise_conv_pad(hidden_states)
|
227 |
+
|
228 |
+
hidden_states = self.depthwise_conv(hidden_states)
|
229 |
+
hidden_states = self.depthwise_norm(hidden_states)
|
230 |
+
hidden_states = self.depthwise_act(hidden_states)
|
231 |
+
|
232 |
+
return hidden_states
|
233 |
+
|
234 |
+
|
235 |
+
class EfficientNetSqueezeExciteLayer(nn.Module):
|
236 |
+
r"""
|
237 |
+
This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
|
238 |
+
"""
|
239 |
+
|
240 |
+
def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool = False):
|
241 |
+
super().__init__()
|
242 |
+
self.dim = expand_dim if expand else in_dim
|
243 |
+
self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
|
244 |
+
|
245 |
+
self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
|
246 |
+
self.reduce = nn.Conv2d(
|
247 |
+
in_channels=self.dim,
|
248 |
+
out_channels=self.dim_se,
|
249 |
+
kernel_size=1,
|
250 |
+
padding="same",
|
251 |
+
)
|
252 |
+
self.expand = nn.Conv2d(
|
253 |
+
in_channels=self.dim_se,
|
254 |
+
out_channels=self.dim,
|
255 |
+
kernel_size=1,
|
256 |
+
padding="same",
|
257 |
+
)
|
258 |
+
self.act_reduce = ACT2FN[config.hidden_act]
|
259 |
+
self.act_expand = nn.Sigmoid()
|
260 |
+
|
261 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
262 |
+
inputs = hidden_states
|
263 |
+
hidden_states = self.squeeze(hidden_states)
|
264 |
+
hidden_states = self.reduce(hidden_states)
|
265 |
+
hidden_states = self.act_reduce(hidden_states)
|
266 |
+
|
267 |
+
hidden_states = self.expand(hidden_states)
|
268 |
+
hidden_states = self.act_expand(hidden_states)
|
269 |
+
hidden_states = torch.mul(inputs, hidden_states)
|
270 |
+
|
271 |
+
return hidden_states
|
272 |
+
|
273 |
+
|
274 |
+
class EfficientNetFinalBlockLayer(nn.Module):
|
275 |
+
r"""
|
276 |
+
This corresponds to the final phase of each block in the original implementation.
|
277 |
+
"""
|
278 |
+
|
279 |
+
def __init__(
|
280 |
+
self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool
|
281 |
+
):
|
282 |
+
super().__init__()
|
283 |
+
self.apply_dropout = stride == 1 and not id_skip
|
284 |
+
self.project_conv = nn.Conv2d(
|
285 |
+
in_channels=in_dim,
|
286 |
+
out_channels=out_dim,
|
287 |
+
kernel_size=1,
|
288 |
+
padding="same",
|
289 |
+
bias=False,
|
290 |
+
)
|
291 |
+
self.project_bn = nn.BatchNorm2d(
|
292 |
+
num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
|
293 |
+
)
|
294 |
+
self.dropout = nn.Dropout(p=drop_rate)
|
295 |
+
|
296 |
+
def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
297 |
+
hidden_states = self.project_conv(hidden_states)
|
298 |
+
hidden_states = self.project_bn(hidden_states)
|
299 |
+
|
300 |
+
if self.apply_dropout:
|
301 |
+
hidden_states = self.dropout(hidden_states)
|
302 |
+
hidden_states = hidden_states + embeddings
|
303 |
+
|
304 |
+
return hidden_states
|
305 |
+
|
306 |
+
|
307 |
+
class EfficientNetBlock(nn.Module):
|
308 |
+
r"""
|
309 |
+
This corresponds to the expansion and depthwise convolution phase of each block in the original implementation.
|
310 |
+
|
311 |
+
Args:
|
312 |
+
config ([`EfficientNetConfig`]):
|
313 |
+
Model configuration class.
|
314 |
+
in_dim (`int`):
|
315 |
+
Number of input channels.
|
316 |
+
out_dim (`int`):
|
317 |
+
Number of output channels.
|
318 |
+
stride (`int`):
|
319 |
+
Stride size to be used in convolution layers.
|
320 |
+
expand_ratio (`int`):
|
321 |
+
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
|
322 |
+
kernel_size (`int`):
|
323 |
+
Kernel size for the depthwise convolution layer.
|
324 |
+
drop_rate (`float`):
|
325 |
+
Dropout rate to be used in the final phase of each block.
|
326 |
+
id_skip (`bool`):
|
327 |
+
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
|
328 |
+
of each block. Set to `True` for the first block of each stage.
|
329 |
+
adjust_padding (`bool`):
|
330 |
+
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
|
331 |
+
operation, set to `True` for inputs with odd input sizes.
|
332 |
+
"""
|
333 |
+
|
334 |
+
def __init__(
|
335 |
+
self,
|
336 |
+
config: EfficientNetConfig,
|
337 |
+
in_dim: int,
|
338 |
+
out_dim: int,
|
339 |
+
stride: int,
|
340 |
+
expand_ratio: int,
|
341 |
+
kernel_size: int,
|
342 |
+
drop_rate: float,
|
343 |
+
id_skip: bool,
|
344 |
+
adjust_padding: bool,
|
345 |
+
):
|
346 |
+
super().__init__()
|
347 |
+
self.expand_ratio = expand_ratio
|
348 |
+
self.expand = True if self.expand_ratio != 1 else False
|
349 |
+
expand_in_dim = in_dim * expand_ratio
|
350 |
+
|
351 |
+
if self.expand:
|
352 |
+
self.expansion = EfficientNetExpansionLayer(
|
353 |
+
config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride
|
354 |
+
)
|
355 |
+
|
356 |
+
self.depthwise_conv = EfficientNetDepthwiseLayer(
|
357 |
+
config=config,
|
358 |
+
in_dim=expand_in_dim if self.expand else in_dim,
|
359 |
+
stride=stride,
|
360 |
+
kernel_size=kernel_size,
|
361 |
+
adjust_padding=adjust_padding,
|
362 |
+
)
|
363 |
+
self.squeeze_excite = EfficientNetSqueezeExciteLayer(
|
364 |
+
config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand
|
365 |
+
)
|
366 |
+
self.projection = EfficientNetFinalBlockLayer(
|
367 |
+
config=config,
|
368 |
+
in_dim=expand_in_dim if self.expand else in_dim,
|
369 |
+
out_dim=out_dim,
|
370 |
+
stride=stride,
|
371 |
+
drop_rate=drop_rate,
|
372 |
+
id_skip=id_skip,
|
373 |
+
)
|
374 |
+
|
375 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
|
376 |
+
embeddings = hidden_states
|
377 |
+
# Expansion and depthwise convolution phase
|
378 |
+
if self.expand_ratio != 1:
|
379 |
+
hidden_states = self.expansion(hidden_states)
|
380 |
+
hidden_states = self.depthwise_conv(hidden_states)
|
381 |
+
|
382 |
+
# Squeeze and excite phase
|
383 |
+
hidden_states = self.squeeze_excite(hidden_states)
|
384 |
+
hidden_states = self.projection(embeddings, hidden_states)
|
385 |
+
return hidden_states
|
386 |
+
|
387 |
+
|
388 |
+
class EfficientNetEncoder(nn.Module):
|
389 |
+
r"""
|
390 |
+
Forward propogates the embeddings through each EfficientNet block.
|
391 |
+
|
392 |
+
Args:
|
393 |
+
config ([`EfficientNetConfig`]):
|
394 |
+
Model configuration class.
|
395 |
+
"""
|
396 |
+
|
397 |
+
def __init__(self, config: EfficientNetConfig):
|
398 |
+
super().__init__()
|
399 |
+
self.config = config
|
400 |
+
self.depth_coefficient = config.depth_coefficient
|
401 |
+
|
402 |
+
def round_repeats(repeats):
|
403 |
+
# Round number of block repeats based on depth multiplier.
|
404 |
+
return int(math.ceil(self.depth_coefficient * repeats))
|
405 |
+
|
406 |
+
num_base_blocks = len(config.in_channels)
|
407 |
+
num_blocks = sum(round_repeats(n) for n in config.num_block_repeats)
|
408 |
+
|
409 |
+
curr_block_num = 0
|
410 |
+
blocks = []
|
411 |
+
for i in range(num_base_blocks):
|
412 |
+
in_dim = round_filters(config, config.in_channels[i])
|
413 |
+
out_dim = round_filters(config, config.out_channels[i])
|
414 |
+
stride = config.strides[i]
|
415 |
+
kernel_size = config.kernel_sizes[i]
|
416 |
+
expand_ratio = config.expand_ratios[i]
|
417 |
+
|
418 |
+
for j in range(round_repeats(config.num_block_repeats[i])):
|
419 |
+
id_skip = True if j == 0 else False
|
420 |
+
stride = 1 if j > 0 else stride
|
421 |
+
in_dim = out_dim if j > 0 else in_dim
|
422 |
+
adjust_padding = False if curr_block_num in config.depthwise_padding else True
|
423 |
+
drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
|
424 |
+
|
425 |
+
block = EfficientNetBlock(
|
426 |
+
config=config,
|
427 |
+
in_dim=in_dim,
|
428 |
+
out_dim=out_dim,
|
429 |
+
stride=stride,
|
430 |
+
kernel_size=kernel_size,
|
431 |
+
expand_ratio=expand_ratio,
|
432 |
+
drop_rate=drop_rate,
|
433 |
+
id_skip=id_skip,
|
434 |
+
adjust_padding=adjust_padding,
|
435 |
+
)
|
436 |
+
blocks.append(block)
|
437 |
+
curr_block_num += 1
|
438 |
+
|
439 |
+
self.blocks = nn.ModuleList(blocks)
|
440 |
+
self.top_conv = nn.Conv2d(
|
441 |
+
in_channels=out_dim,
|
442 |
+
out_channels=round_filters(config, 1280),
|
443 |
+
kernel_size=1,
|
444 |
+
padding="same",
|
445 |
+
bias=False,
|
446 |
+
)
|
447 |
+
self.top_bn = nn.BatchNorm2d(
|
448 |
+
num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
|
449 |
+
)
|
450 |
+
self.top_activation = ACT2FN[config.hidden_act]
|
451 |
+
|
452 |
+
def forward(
|
453 |
+
self,
|
454 |
+
hidden_states: torch.FloatTensor,
|
455 |
+
output_hidden_states: Optional[bool] = False,
|
456 |
+
return_dict: Optional[bool] = True,
|
457 |
+
) -> BaseModelOutputWithNoAttention:
|
458 |
+
all_hidden_states = (hidden_states,) if output_hidden_states else None
|
459 |
+
|
460 |
+
for block in self.blocks:
|
461 |
+
hidden_states = block(hidden_states)
|
462 |
+
if output_hidden_states:
|
463 |
+
all_hidden_states += (hidden_states,)
|
464 |
+
|
465 |
+
hidden_states = self.top_conv(hidden_states)
|
466 |
+
hidden_states = self.top_bn(hidden_states)
|
467 |
+
hidden_states = self.top_activation(hidden_states)
|
468 |
+
|
469 |
+
if not return_dict:
|
470 |
+
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
|
471 |
+
|
472 |
+
return BaseModelOutputWithNoAttention(
|
473 |
+
last_hidden_state=hidden_states,
|
474 |
+
hidden_states=all_hidden_states,
|
475 |
+
)
|
476 |
+
|
477 |
+
|
478 |
+
class EfficientNetPreTrainedModel(PreTrainedModel):
|
479 |
+
"""
|
480 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
481 |
+
models.
|
482 |
+
"""
|
483 |
+
|
484 |
+
config_class = EfficientNetConfig
|
485 |
+
base_model_prefix = "efficientnet"
|
486 |
+
main_input_name = "pixel_values"
|
487 |
+
_no_split_modules = []
|
488 |
+
|
489 |
+
def _init_weights(self, module):
|
490 |
+
"""Initialize the weights"""
|
491 |
+
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
492 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
493 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
494 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
495 |
+
if module.bias is not None:
|
496 |
+
module.bias.data.zero_()
|
497 |
+
elif isinstance(module, nn.LayerNorm):
|
498 |
+
module.bias.data.zero_()
|
499 |
+
module.weight.data.fill_(1.0)
|
500 |
+
|
501 |
+
|
502 |
+
@add_start_docstrings(
|
503 |
+
"The bare EfficientNet model outputting raw features without any specific head on top.",
|
504 |
+
EFFICIENTNET_START_DOCSTRING,
|
505 |
+
)
|
506 |
+
class EfficientNetModel(EfficientNetPreTrainedModel):
|
507 |
+
def __init__(self, config: EfficientNetConfig):
|
508 |
+
super().__init__(config)
|
509 |
+
self.config = config
|
510 |
+
self.embeddings = EfficientNetEmbeddings(config)
|
511 |
+
self.encoder = EfficientNetEncoder(config)
|
512 |
+
|
513 |
+
# Final pooling layer
|
514 |
+
if config.pooling_type == "mean":
|
515 |
+
self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
|
516 |
+
elif config.pooling_type == "max":
|
517 |
+
self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
|
518 |
+
else:
|
519 |
+
raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
|
520 |
+
|
521 |
+
# Initialize weights and apply final processing
|
522 |
+
self.post_init()
|
523 |
+
|
524 |
+
@add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING)
|
525 |
+
@add_code_sample_docstrings(
|
526 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
527 |
+
output_type=BaseModelOutputWithPoolingAndNoAttention,
|
528 |
+
config_class=_CONFIG_FOR_DOC,
|
529 |
+
modality="vision",
|
530 |
+
expected_output=_EXPECTED_OUTPUT_SHAPE,
|
531 |
+
)
|
532 |
+
def forward(
|
533 |
+
self,
|
534 |
+
pixel_values: torch.FloatTensor = None,
|
535 |
+
output_hidden_states: Optional[bool] = None,
|
536 |
+
return_dict: Optional[bool] = None,
|
537 |
+
) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
|
538 |
+
output_hidden_states = (
|
539 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
540 |
+
)
|
541 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
542 |
+
|
543 |
+
if pixel_values is None:
|
544 |
+
raise ValueError("You have to specify pixel_values")
|
545 |
+
|
546 |
+
embedding_output = self.embeddings(pixel_values)
|
547 |
+
|
548 |
+
encoder_outputs = self.encoder(
|
549 |
+
embedding_output,
|
550 |
+
output_hidden_states=output_hidden_states,
|
551 |
+
return_dict=return_dict,
|
552 |
+
)
|
553 |
+
# Apply pooling
|
554 |
+
last_hidden_state = encoder_outputs[0]
|
555 |
+
pooled_output = self.pooler(last_hidden_state)
|
556 |
+
# Reshape (batch_size, 1280, 1 , 1) -> (batch_size, 1280)
|
557 |
+
pooled_output = pooled_output.reshape(pooled_output.shape[:2])
|
558 |
+
|
559 |
+
if not return_dict:
|
560 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
561 |
+
|
562 |
+
return BaseModelOutputWithPoolingAndNoAttention(
|
563 |
+
last_hidden_state=last_hidden_state,
|
564 |
+
pooler_output=pooled_output,
|
565 |
+
hidden_states=encoder_outputs.hidden_states,
|
566 |
+
)
|
567 |
+
|
568 |
+
|
569 |
+
@add_start_docstrings(
|
570 |
+
"""
|
571 |
+
EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g.
|
572 |
+
for ImageNet.
|
573 |
+
""",
|
574 |
+
EFFICIENTNET_START_DOCSTRING,
|
575 |
+
)
|
576 |
+
class EfficientNetForImageClassification(EfficientNetPreTrainedModel):
|
577 |
+
def __init__(self, config):
|
578 |
+
super().__init__(config)
|
579 |
+
self.num_labels = config.num_labels
|
580 |
+
self.config = config
|
581 |
+
self.efficientnet = EfficientNetModel(config)
|
582 |
+
# Classifier head
|
583 |
+
self.dropout = nn.Dropout(p=config.dropout_rate)
|
584 |
+
self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity()
|
585 |
+
|
586 |
+
# Initialize weights and apply final processing
|
587 |
+
self.post_init()
|
588 |
+
|
589 |
+
@add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING)
|
590 |
+
@add_code_sample_docstrings(
|
591 |
+
checkpoint=_IMAGE_CLASS_CHECKPOINT,
|
592 |
+
output_type=ImageClassifierOutputWithNoAttention,
|
593 |
+
config_class=_CONFIG_FOR_DOC,
|
594 |
+
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
|
595 |
+
)
|
596 |
+
def forward(
|
597 |
+
self,
|
598 |
+
pixel_values: torch.FloatTensor = None,
|
599 |
+
labels: Optional[torch.LongTensor] = None,
|
600 |
+
output_hidden_states: Optional[bool] = None,
|
601 |
+
return_dict: Optional[bool] = None,
|
602 |
+
) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
|
603 |
+
r"""
|
604 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
605 |
+
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
606 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
607 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
608 |
+
"""
|
609 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
610 |
+
|
611 |
+
outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
612 |
+
|
613 |
+
pooled_output = outputs.pooler_output if return_dict else outputs[1]
|
614 |
+
pooled_output = self.dropout(pooled_output)
|
615 |
+
logits = self.classifier(pooled_output)
|
616 |
+
|
617 |
+
loss = None
|
618 |
+
if labels is not None:
|
619 |
+
if self.config.problem_type is None:
|
620 |
+
if self.num_labels == 1:
|
621 |
+
self.config.problem_type = "regression"
|
622 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
623 |
+
self.config.problem_type = "single_label_classification"
|
624 |
+
else:
|
625 |
+
self.config.problem_type = "multi_label_classification"
|
626 |
+
|
627 |
+
if self.config.problem_type == "regression":
|
628 |
+
loss_fct = MSELoss()
|
629 |
+
if self.num_labels == 1:
|
630 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
631 |
+
else:
|
632 |
+
loss = loss_fct(logits, labels)
|
633 |
+
elif self.config.problem_type == "single_label_classification":
|
634 |
+
loss_fct = CrossEntropyLoss()
|
635 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
636 |
+
elif self.config.problem_type == "multi_label_classification":
|
637 |
+
loss_fct = BCEWithLogitsLoss()
|
638 |
+
loss = loss_fct(logits, labels)
|
639 |
+
|
640 |
+
if not return_dict:
|
641 |
+
output = (logits,) + outputs[2:]
|
642 |
+
return ((loss,) + output) if loss is not None else output
|
643 |
+
|
644 |
+
return ImageClassifierOutputWithNoAttention(
|
645 |
+
loss=loss,
|
646 |
+
logits=logits,
|
647 |
+
hidden_states=outputs.hidden_states,
|
648 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__init__.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_torch_available,
|
20 |
+
is_torchaudio_available,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
_import_structure = {
|
25 |
+
"configuration_musicgen_melody": [
|
26 |
+
"MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
27 |
+
"MusicgenMelodyConfig",
|
28 |
+
"MusicgenMelodyDecoderConfig",
|
29 |
+
],
|
30 |
+
}
|
31 |
+
|
32 |
+
try:
|
33 |
+
if not is_torch_available():
|
34 |
+
raise OptionalDependencyNotAvailable()
|
35 |
+
except OptionalDependencyNotAvailable:
|
36 |
+
pass
|
37 |
+
else:
|
38 |
+
_import_structure["modeling_musicgen_melody"] = [
|
39 |
+
"MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST",
|
40 |
+
"MusicgenMelodyForConditionalGeneration",
|
41 |
+
"MusicgenMelodyForCausalLM",
|
42 |
+
"MusicgenMelodyModel",
|
43 |
+
"MusicgenMelodyPreTrainedModel",
|
44 |
+
]
|
45 |
+
|
46 |
+
try:
|
47 |
+
if not is_torchaudio_available():
|
48 |
+
raise OptionalDependencyNotAvailable()
|
49 |
+
except OptionalDependencyNotAvailable:
|
50 |
+
pass
|
51 |
+
else:
|
52 |
+
_import_structure["feature_extraction_musicgen_melody"] = ["MusicgenMelodyFeatureExtractor"]
|
53 |
+
_import_structure["processing_musicgen_melody"] = ["MusicgenMelodyProcessor"]
|
54 |
+
|
55 |
+
|
56 |
+
if TYPE_CHECKING:
|
57 |
+
from .configuration_musicgen_melody import (
|
58 |
+
MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
59 |
+
MusicgenMelodyConfig,
|
60 |
+
MusicgenMelodyDecoderConfig,
|
61 |
+
)
|
62 |
+
|
63 |
+
try:
|
64 |
+
if not is_torch_available():
|
65 |
+
raise OptionalDependencyNotAvailable()
|
66 |
+
except OptionalDependencyNotAvailable:
|
67 |
+
pass
|
68 |
+
else:
|
69 |
+
from .modeling_musicgen_melody import (
|
70 |
+
MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST,
|
71 |
+
MusicgenMelodyForCausalLM,
|
72 |
+
MusicgenMelodyForConditionalGeneration,
|
73 |
+
MusicgenMelodyModel,
|
74 |
+
MusicgenMelodyPreTrainedModel,
|
75 |
+
)
|
76 |
+
|
77 |
+
try:
|
78 |
+
if not is_torchaudio_available():
|
79 |
+
raise OptionalDependencyNotAvailable()
|
80 |
+
except OptionalDependencyNotAvailable:
|
81 |
+
pass
|
82 |
+
else:
|
83 |
+
from .feature_extraction_musicgen_melody import MusicgenMelodyFeatureExtractor
|
84 |
+
from .processing_musicgen_melody import MusicgenMelodyProcessor
|
85 |
+
|
86 |
+
|
87 |
+
else:
|
88 |
+
import sys
|
89 |
+
|
90 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.44 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/configuration_musicgen_melody.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/convert_musicgen_melody_transformers.cpython-310.pyc
ADDED
Binary file (7.41 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/feature_extraction_musicgen_melody.cpython-310.pyc
ADDED
Binary file (12.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/modeling_musicgen_melody.cpython-310.pyc
ADDED
Binary file (81.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/__pycache__/processing_musicgen_melody.cpython-310.pyc
ADDED
Binary file (7.33 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/configuration_musicgen_melody.py
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
""" Musicgen Melody model configuration"""
|
16 |
+
|
17 |
+
from ...configuration_utils import PretrainedConfig
|
18 |
+
from ...utils import logging
|
19 |
+
from ..auto.configuration_auto import AutoConfig
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.get_logger(__name__)
|
23 |
+
|
24 |
+
from ..deprecated._archive_maps import MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
|
25 |
+
|
26 |
+
|
27 |
+
class MusicgenMelodyDecoderConfig(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of an [`MusicgenMelodyDecoder`]. It is used to instantiate a
|
30 |
+
Musicgen Melody decoder according to the specified arguments, defining the model architecture. Instantiating a
|
31 |
+
configuration with the defaults will yield a similar configuration to that of the Musicgen Melody
|
32 |
+
[facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture.
|
33 |
+
|
34 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
35 |
+
documentation from [`PretrainedConfig`] for more information.
|
36 |
+
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_size (`int`, *optional*, defaults to 2048):
|
40 |
+
Vocabulary size of the MusicgenMelodyDecoder model. Defines the number of different tokens that can be
|
41 |
+
represented by the `inputs_ids` passed when calling [`MusicgenMelodyDecoder`].
|
42 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
43 |
+
The maximum sequence length that this model might ever be used with. Typically, set this to something large
|
44 |
+
just in case (e.g., 512 or 1024 or 2048).
|
45 |
+
num_hidden_layers (`int`, *optional*, defaults to 24):
|
46 |
+
Number of decoder layers.
|
47 |
+
ffn_dim (`int`, *optional*, defaults to 4096):
|
48 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
|
49 |
+
num_attention_heads (`int`, *optional*, defaults to 16):
|
50 |
+
Number of attention heads for each attention layer in the Transformer block.
|
51 |
+
layerdrop (`float`, *optional*, defaults to 0.0):
|
52 |
+
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
53 |
+
for more details.
|
54 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
55 |
+
Whether the model should return the last key/values attentions (not used by all models)
|
56 |
+
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
|
57 |
+
The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
|
58 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
59 |
+
hidden_size (`int`, *optional*, defaults to 1024):
|
60 |
+
Dimensionality of the layers and the pooler layer.
|
61 |
+
dropout (`float`, *optional*, defaults to 0.1):
|
62 |
+
The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
|
63 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
64 |
+
The dropout ratio for the attention probabilities.
|
65 |
+
activation_dropout (`float`, *optional*, defaults to 0.0):
|
66 |
+
The dropout ratio for activations inside the fully connected layer.
|
67 |
+
initializer_factor (`float`, *optional*, defaults to 0.02):
|
68 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
69 |
+
scale_embedding (`bool`, *optional*, defaults to `False`):
|
70 |
+
Scale embeddings by diving by sqrt(hidden_size).
|
71 |
+
num_codebooks (`int`, *optional*, defaults to 4):
|
72 |
+
The number of parallel codebooks forwarded to the model.
|
73 |
+
audio_channels (`int`, *optional*, defaults to 1):
|
74 |
+
Number of audio channels used by the model (either mono or stereo). Stereo models generate a separate
|
75 |
+
audio stream for the left/right output channels. Mono models generate a single audio stream output.
|
76 |
+
pad_token_id (`int`, *optional*, defaults to 2048): The id of the *padding* token.
|
77 |
+
bos_token_id (`int`, *optional*, defaults to 2048): The id of the *beginning-of-sequence* token.
|
78 |
+
eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token.
|
79 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings with the text encoder.
|
80 |
+
"""
|
81 |
+
|
82 |
+
model_type = "musicgen_melody_decoder"
|
83 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
84 |
+
|
85 |
+
def __init__(
|
86 |
+
self,
|
87 |
+
vocab_size=2048,
|
88 |
+
max_position_embeddings=2048,
|
89 |
+
num_hidden_layers=24,
|
90 |
+
ffn_dim=4096,
|
91 |
+
num_attention_heads=16,
|
92 |
+
layerdrop=0.0,
|
93 |
+
use_cache=True,
|
94 |
+
activation_function="gelu",
|
95 |
+
hidden_size=1024,
|
96 |
+
dropout=0.1,
|
97 |
+
attention_dropout=0.0,
|
98 |
+
activation_dropout=0.0,
|
99 |
+
initializer_factor=0.02,
|
100 |
+
scale_embedding=False,
|
101 |
+
num_codebooks=4,
|
102 |
+
audio_channels=1,
|
103 |
+
pad_token_id=2048,
|
104 |
+
bos_token_id=2048,
|
105 |
+
eos_token_id=None,
|
106 |
+
tie_word_embeddings=False,
|
107 |
+
**kwargs,
|
108 |
+
):
|
109 |
+
self.vocab_size = vocab_size
|
110 |
+
self.max_position_embeddings = max_position_embeddings
|
111 |
+
self.hidden_size = hidden_size
|
112 |
+
self.ffn_dim = ffn_dim
|
113 |
+
self.num_hidden_layers = num_hidden_layers
|
114 |
+
self.num_attention_heads = num_attention_heads
|
115 |
+
self.dropout = dropout
|
116 |
+
self.attention_dropout = attention_dropout
|
117 |
+
self.activation_dropout = activation_dropout
|
118 |
+
self.activation_function = activation_function
|
119 |
+
self.initializer_factor = initializer_factor
|
120 |
+
self.layerdrop = layerdrop
|
121 |
+
self.use_cache = use_cache
|
122 |
+
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
123 |
+
self.num_codebooks = num_codebooks
|
124 |
+
|
125 |
+
if audio_channels not in [1, 2]:
|
126 |
+
raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.")
|
127 |
+
self.audio_channels = audio_channels
|
128 |
+
|
129 |
+
super().__init__(
|
130 |
+
pad_token_id=pad_token_id,
|
131 |
+
bos_token_id=bos_token_id,
|
132 |
+
eos_token_id=eos_token_id,
|
133 |
+
tie_word_embeddings=tie_word_embeddings,
|
134 |
+
**kwargs,
|
135 |
+
)
|
136 |
+
|
137 |
+
|
138 |
+
class MusicgenMelodyConfig(PretrainedConfig):
|
139 |
+
r"""
|
140 |
+
This is the configuration class to store the configuration of a [`MusicgenMelodyModel`]. It is used to instantiate a
|
141 |
+
Musicgen Melody model according to the specified arguments, defining the text encoder, audio encoder and Musicgen Melody decoder
|
142 |
+
configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Musicgen Melody
|
143 |
+
[facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture.
|
144 |
+
|
145 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
146 |
+
documentation from [`PretrainedConfig`] for more information.
|
147 |
+
|
148 |
+
Args:
|
149 |
+
num_chroma (`int`, *optional*, defaults to 12): Number of chroma bins to use.
|
150 |
+
chroma_length (`int`, *optional*, defaults to 235):
|
151 |
+
Maximum chroma duration if audio is used to condition the model. Corresponds to the maximum duration used during training.
|
152 |
+
kwargs (*optional*):
|
153 |
+
Dictionary of keyword arguments. Notably:
|
154 |
+
|
155 |
+
- **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
|
156 |
+
defines the text encoder config.
|
157 |
+
- **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
|
158 |
+
defines the audio encoder config.
|
159 |
+
- **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
|
160 |
+
the decoder config.
|
161 |
+
|
162 |
+
Example:
|
163 |
+
|
164 |
+
```python
|
165 |
+
>>> from transformers import (
|
166 |
+
... MusicgenMelodyConfig,
|
167 |
+
... MusicgenMelodyDecoderConfig,
|
168 |
+
... T5Config,
|
169 |
+
... EncodecConfig,
|
170 |
+
... MusicgenMelodyForConditionalGeneration,
|
171 |
+
... )
|
172 |
+
|
173 |
+
>>> # Initializing text encoder, audio encoder, and decoder model configurations
|
174 |
+
>>> text_encoder_config = T5Config()
|
175 |
+
>>> audio_encoder_config = EncodecConfig()
|
176 |
+
>>> decoder_config = MusicgenMelodyDecoderConfig()
|
177 |
+
|
178 |
+
>>> configuration = MusicgenMelodyConfig.from_sub_models_config(
|
179 |
+
... text_encoder_config, audio_encoder_config, decoder_config
|
180 |
+
... )
|
181 |
+
|
182 |
+
>>> # Initializing a MusicgenMelodyForConditionalGeneration (with random weights) from the facebook/musicgen-melody style configuration
|
183 |
+
>>> model = MusicgenMelodyForConditionalGeneration(configuration)
|
184 |
+
|
185 |
+
>>> # Accessing the model configuration
|
186 |
+
>>> configuration = model.config
|
187 |
+
>>> config_text_encoder = model.config.text_encoder
|
188 |
+
>>> config_audio_encoder = model.config.audio_encoder
|
189 |
+
>>> config_decoder = model.config.decoder
|
190 |
+
|
191 |
+
>>> # Saving the model, including its configuration
|
192 |
+
>>> model.save_pretrained("musicgen_melody-model")
|
193 |
+
|
194 |
+
>>> # loading model and config from pretrained folder
|
195 |
+
>>> musicgen_melody_config = MusicgenMelodyConfig.from_pretrained("musicgen_melody-model")
|
196 |
+
>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("musicgen_melody-model", config=musicgen_melody_config)
|
197 |
+
```"""
|
198 |
+
|
199 |
+
model_type = "musicgen_melody"
|
200 |
+
is_composition = True
|
201 |
+
|
202 |
+
def __init__(
|
203 |
+
self,
|
204 |
+
num_chroma=12,
|
205 |
+
chroma_length=235,
|
206 |
+
**kwargs,
|
207 |
+
):
|
208 |
+
super().__init__(**kwargs)
|
209 |
+
if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs:
|
210 |
+
raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config")
|
211 |
+
|
212 |
+
text_encoder_config = kwargs.pop("text_encoder")
|
213 |
+
text_encoder_model_type = text_encoder_config.pop("model_type")
|
214 |
+
|
215 |
+
audio_encoder_config = kwargs.pop("audio_encoder")
|
216 |
+
audio_encoder_model_type = audio_encoder_config.pop("model_type")
|
217 |
+
|
218 |
+
decoder_config = kwargs.pop("decoder")
|
219 |
+
|
220 |
+
self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config)
|
221 |
+
self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config)
|
222 |
+
self.decoder = MusicgenMelodyDecoderConfig(**decoder_config)
|
223 |
+
self.is_encoder_decoder = False
|
224 |
+
|
225 |
+
self.num_chroma = num_chroma
|
226 |
+
self.chroma_length = chroma_length
|
227 |
+
|
228 |
+
@classmethod
|
229 |
+
def from_sub_models_config(
|
230 |
+
cls,
|
231 |
+
text_encoder_config: PretrainedConfig,
|
232 |
+
audio_encoder_config: PretrainedConfig,
|
233 |
+
decoder_config: MusicgenMelodyDecoderConfig,
|
234 |
+
**kwargs,
|
235 |
+
):
|
236 |
+
r"""
|
237 |
+
Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder
|
238 |
+
configurations.
|
239 |
+
|
240 |
+
Returns:
|
241 |
+
[`MusicgenMelodyConfig`]: An instance of a configuration object
|
242 |
+
"""
|
243 |
+
|
244 |
+
return cls(
|
245 |
+
text_encoder=text_encoder_config.to_dict(),
|
246 |
+
audio_encoder=audio_encoder_config.to_dict(),
|
247 |
+
decoder=decoder_config.to_dict(),
|
248 |
+
**kwargs,
|
249 |
+
)
|
250 |
+
|
251 |
+
@property
|
252 |
+
# This is a property because you might want to change the codec model on the fly
|
253 |
+
def sampling_rate(self):
|
254 |
+
return self.audio_encoder.sampling_rate
|
255 |
+
|
256 |
+
@property
|
257 |
+
def _attn_implementation(self):
|
258 |
+
# This property is made private for now (as it cannot be changed and a PreTrainedModel.use_attn_implementation method needs to be implemented.)
|
259 |
+
if hasattr(self, "_attn_implementation_internal"):
|
260 |
+
if self._attn_implementation_internal is None:
|
261 |
+
# `config.attn_implementation` should never be None, for backward compatibility.
|
262 |
+
return "eager"
|
263 |
+
else:
|
264 |
+
return self._attn_implementation_internal
|
265 |
+
else:
|
266 |
+
return "eager"
|
267 |
+
|
268 |
+
@_attn_implementation.setter
|
269 |
+
def _attn_implementation(self, value):
|
270 |
+
self._attn_implementation_internal = value
|
271 |
+
self.decoder._attn_implementation = value
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py
ADDED
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""Convert Musicgen Melody checkpoints from the original repository."""
|
16 |
+
import argparse
|
17 |
+
from pathlib import Path
|
18 |
+
from typing import Dict, OrderedDict, Tuple
|
19 |
+
|
20 |
+
import torch
|
21 |
+
from audiocraft.models import MusicGen
|
22 |
+
|
23 |
+
from transformers import (
|
24 |
+
AutoTokenizer,
|
25 |
+
EncodecModel,
|
26 |
+
T5EncoderModel,
|
27 |
+
)
|
28 |
+
from transformers.models.musicgen_melody.configuration_musicgen_melody import MusicgenMelodyDecoderConfig
|
29 |
+
from transformers.models.musicgen_melody.feature_extraction_musicgen_melody import MusicgenMelodyFeatureExtractor
|
30 |
+
from transformers.models.musicgen_melody.modeling_musicgen_melody import (
|
31 |
+
MusicgenMelodyForCausalLM,
|
32 |
+
MusicgenMelodyForConditionalGeneration,
|
33 |
+
)
|
34 |
+
from transformers.models.musicgen_melody.processing_musicgen_melody import MusicgenMelodyProcessor
|
35 |
+
from transformers.utils import logging
|
36 |
+
|
37 |
+
|
38 |
+
logging.set_verbosity_info()
|
39 |
+
logger = logging.get_logger(__name__)
|
40 |
+
|
41 |
+
|
42 |
+
EXPECTED_MISSING_KEYS = ["model.decoder.embed_positions.weights"]
|
43 |
+
EXPECTED_ADDITIONAL_KEYS = ["condition_provider.conditioners.self_wav.chroma.spec.window"]
|
44 |
+
|
45 |
+
|
46 |
+
def rename_keys(name):
|
47 |
+
if "emb" in name:
|
48 |
+
name = name.replace("emb", "model.decoder.embed_tokens")
|
49 |
+
if "transformer" in name:
|
50 |
+
name = name.replace("transformer", "model.decoder")
|
51 |
+
if "cross_attention" in name:
|
52 |
+
name = name.replace("cross_attention", "encoder_attn")
|
53 |
+
if "linear1" in name:
|
54 |
+
name = name.replace("linear1", "fc1")
|
55 |
+
if "linear2" in name:
|
56 |
+
name = name.replace("linear2", "fc2")
|
57 |
+
if "norm1" in name:
|
58 |
+
name = name.replace("norm1", "self_attn_layer_norm")
|
59 |
+
if "norm_cross" in name:
|
60 |
+
name = name.replace("norm_cross", "encoder_attn_layer_norm")
|
61 |
+
if "norm2" in name:
|
62 |
+
name = name.replace("norm2", "final_layer_norm")
|
63 |
+
if "out_norm" in name:
|
64 |
+
name = name.replace("out_norm", "model.decoder.layer_norm")
|
65 |
+
if "linears" in name:
|
66 |
+
name = name.replace("linears", "lm_heads")
|
67 |
+
if "condition_provider.conditioners.description.output_proj" in name:
|
68 |
+
name = name.replace("condition_provider.conditioners.description.output_proj", "enc_to_dec_proj")
|
69 |
+
if "condition_provider.conditioners.self_wav.output_proj" in name:
|
70 |
+
name = name.replace("condition_provider.conditioners.self_wav.output_proj", "audio_enc_to_dec_proj")
|
71 |
+
return name
|
72 |
+
|
73 |
+
|
74 |
+
def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> Tuple[Dict, Dict]:
|
75 |
+
"""Function that takes the fairseq MusicgenMelody state dict and renames it according to the HF
|
76 |
+
module names. It further partitions the state dict into the decoder (LM) state dict, and that for the
|
77 |
+
text encoder projection and for the audio encoder projection."""
|
78 |
+
keys = list(state_dict.keys())
|
79 |
+
enc_dec_proj_state_dict = {}
|
80 |
+
audio_enc_to_dec_proj_state_dict = {}
|
81 |
+
for key in keys:
|
82 |
+
val = state_dict.pop(key)
|
83 |
+
key = rename_keys(key)
|
84 |
+
if "in_proj_weight" in key:
|
85 |
+
# split fused qkv proj
|
86 |
+
state_dict[key.replace("in_proj_weight", "q_proj.weight")] = val[:hidden_size, :]
|
87 |
+
state_dict[key.replace("in_proj_weight", "k_proj.weight")] = val[hidden_size : 2 * hidden_size, :]
|
88 |
+
state_dict[key.replace("in_proj_weight", "v_proj.weight")] = val[-hidden_size:, :]
|
89 |
+
elif "audio_enc_to_dec_proj" in key:
|
90 |
+
audio_enc_to_dec_proj_state_dict[key[len("audio_enc_to_dec_proj.") :]] = val
|
91 |
+
elif "enc_to_dec_proj" in key:
|
92 |
+
enc_dec_proj_state_dict[key[len("enc_to_dec_proj.") :]] = val
|
93 |
+
else:
|
94 |
+
state_dict[key] = val
|
95 |
+
return state_dict, enc_dec_proj_state_dict, audio_enc_to_dec_proj_state_dict
|
96 |
+
|
97 |
+
|
98 |
+
def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenMelodyDecoderConfig:
|
99 |
+
if checkpoint == "facebook/musicgen-melody" or checkpoint == "facebook/musicgen-stereo-melody":
|
100 |
+
hidden_size = 1536
|
101 |
+
num_hidden_layers = 48
|
102 |
+
num_attention_heads = 24
|
103 |
+
elif checkpoint == "facebook/musicgen-melody-large" or checkpoint == "facebook/musicgen-stereo-melody-large":
|
104 |
+
hidden_size = 2048
|
105 |
+
num_hidden_layers = 48
|
106 |
+
num_attention_heads = 32
|
107 |
+
else:
|
108 |
+
raise ValueError(
|
109 |
+
"Checkpoint should be one of `['facebook/musicgen-melody', 'facebook/musicgen-melody-large']` for the mono checkpoints, "
|
110 |
+
"or `['facebook/musicgen-stereo-melody', 'facebook/musicgen-stereo-melody-large']` "
|
111 |
+
f"for the stereo checkpoints, got {checkpoint}."
|
112 |
+
)
|
113 |
+
|
114 |
+
if "stereo" in checkpoint:
|
115 |
+
audio_channels = 2
|
116 |
+
num_codebooks = 8
|
117 |
+
else:
|
118 |
+
audio_channels = 1
|
119 |
+
num_codebooks = 4
|
120 |
+
|
121 |
+
config = MusicgenMelodyDecoderConfig(
|
122 |
+
hidden_size=hidden_size,
|
123 |
+
ffn_dim=hidden_size * 4,
|
124 |
+
num_hidden_layers=num_hidden_layers,
|
125 |
+
num_attention_heads=num_attention_heads,
|
126 |
+
num_codebooks=num_codebooks,
|
127 |
+
audio_channels=audio_channels,
|
128 |
+
)
|
129 |
+
return config
|
130 |
+
|
131 |
+
|
132 |
+
@torch.no_grad()
|
133 |
+
def convert_musicgen_melody_checkpoint(
|
134 |
+
checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", test_same_output=False
|
135 |
+
):
|
136 |
+
fairseq_model = MusicGen.get_pretrained(checkpoint, device=args.device)
|
137 |
+
decoder_config = decoder_config_from_checkpoint(checkpoint)
|
138 |
+
|
139 |
+
decoder_state_dict = fairseq_model.lm.state_dict()
|
140 |
+
decoder_state_dict, enc_dec_proj_state_dict, audio_enc_to_dec_proj_state_dict = rename_state_dict(
|
141 |
+
decoder_state_dict, hidden_size=decoder_config.hidden_size
|
142 |
+
)
|
143 |
+
|
144 |
+
text_encoder = T5EncoderModel.from_pretrained("t5-base")
|
145 |
+
audio_encoder = EncodecModel.from_pretrained("facebook/encodec_32khz")
|
146 |
+
decoder = MusicgenMelodyForCausalLM(decoder_config).eval()
|
147 |
+
|
148 |
+
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
|
149 |
+
missing_keys, unexpected_keys = decoder.load_state_dict(decoder_state_dict, strict=False)
|
150 |
+
|
151 |
+
for key in missing_keys.copy():
|
152 |
+
if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS:
|
153 |
+
missing_keys.remove(key)
|
154 |
+
|
155 |
+
for key in unexpected_keys.copy():
|
156 |
+
if key in EXPECTED_ADDITIONAL_KEYS:
|
157 |
+
unexpected_keys.remove(key)
|
158 |
+
|
159 |
+
if len(missing_keys) > 0:
|
160 |
+
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}")
|
161 |
+
|
162 |
+
if len(unexpected_keys) > 0:
|
163 |
+
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}")
|
164 |
+
|
165 |
+
# init the composite model
|
166 |
+
model = MusicgenMelodyForConditionalGeneration(
|
167 |
+
text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder
|
168 |
+
).to(args.device)
|
169 |
+
|
170 |
+
# load the pre-trained enc-dec projection (from the decoder state dict)
|
171 |
+
model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict)
|
172 |
+
|
173 |
+
# load the pre-trained audio encoder projection (from the decoder state dict)
|
174 |
+
model.audio_enc_to_dec_proj.load_state_dict(audio_enc_to_dec_proj_state_dict)
|
175 |
+
|
176 |
+
# check we can do a forward pass
|
177 |
+
input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1).to(device)
|
178 |
+
decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1).to(device)
|
179 |
+
|
180 |
+
with torch.no_grad():
|
181 |
+
logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits
|
182 |
+
|
183 |
+
output_length = 1 + input_ids.shape[1] + model.config.chroma_length
|
184 |
+
if logits.shape != (2 * decoder_config.num_codebooks, output_length, 2048):
|
185 |
+
raise ValueError("Incorrect shape for logits")
|
186 |
+
|
187 |
+
# now construct the processor
|
188 |
+
tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
189 |
+
feature_extractor = MusicgenMelodyFeatureExtractor()
|
190 |
+
|
191 |
+
processor = MusicgenMelodyProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
192 |
+
|
193 |
+
# set the appropriate bos/pad token ids
|
194 |
+
model.generation_config.decoder_start_token_id = 2048
|
195 |
+
model.generation_config.pad_token_id = 2048
|
196 |
+
|
197 |
+
# set other default generation config params
|
198 |
+
model.generation_config.max_length = int(30 * audio_encoder.config.frame_rate)
|
199 |
+
model.generation_config.do_sample = True
|
200 |
+
model.generation_config.guidance_scale = 3.0
|
201 |
+
|
202 |
+
if test_same_output:
|
203 |
+
# check same output than original model
|
204 |
+
decoder_input_ids = torch.ones_like(decoder_input_ids).to(device) * model.generation_config.pad_token_id
|
205 |
+
with torch.no_grad():
|
206 |
+
decoder_input_ids = decoder_input_ids[: decoder_config.num_codebooks]
|
207 |
+
inputs = processor(text=["gen"], return_tensors="pt", padding=True).to(device)
|
208 |
+
logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits
|
209 |
+
|
210 |
+
attributes, prompt_tokens = fairseq_model._prepare_tokens_and_attributes(["gen"], None)
|
211 |
+
original_logits = fairseq_model.lm.forward(
|
212 |
+
decoder_input_ids.reshape(1, decoder_config.num_codebooks, -1), attributes
|
213 |
+
)
|
214 |
+
|
215 |
+
torch.testing.assert_close(
|
216 |
+
original_logits.squeeze(2).reshape(decoder_config.num_codebooks, -1),
|
217 |
+
logits[:, -1],
|
218 |
+
rtol=1e-5,
|
219 |
+
atol=5e-5,
|
220 |
+
)
|
221 |
+
|
222 |
+
if pytorch_dump_folder is not None:
|
223 |
+
Path(pytorch_dump_folder).mkdir(exist_ok=True)
|
224 |
+
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}")
|
225 |
+
model.save_pretrained(pytorch_dump_folder)
|
226 |
+
processor.save_pretrained(pytorch_dump_folder)
|
227 |
+
|
228 |
+
if repo_id:
|
229 |
+
logger.info(f"Pushing model {checkpoint} to {repo_id}")
|
230 |
+
model.push_to_hub(repo_id, create_pr=True)
|
231 |
+
processor.push_to_hub(repo_id, create_pr=True)
|
232 |
+
|
233 |
+
|
234 |
+
if __name__ == "__main__":
|
235 |
+
parser = argparse.ArgumentParser()
|
236 |
+
# Required parameters
|
237 |
+
parser.add_argument(
|
238 |
+
"--checkpoint",
|
239 |
+
default="facebook/musicgen-melody",
|
240 |
+
type=str,
|
241 |
+
help="Checkpoint size of the Musicgen Melody model you'd like to convert. Can be one of: "
|
242 |
+
"`['facebook/musicgen-melody', 'facebook/musicgen-melody-large']` for the mono checkpoints, or "
|
243 |
+
"`['facebook/musicgen-stereo-melody', 'facebook/musicgen-stereo-melody-large']` "
|
244 |
+
"for the stereo checkpoints.",
|
245 |
+
)
|
246 |
+
parser.add_argument(
|
247 |
+
"--pytorch_dump_folder",
|
248 |
+
default=None,
|
249 |
+
type=str,
|
250 |
+
help="Path to the output PyTorch model directory.",
|
251 |
+
)
|
252 |
+
parser.add_argument(
|
253 |
+
"--push_to_hub",
|
254 |
+
default="musicgen-melody",
|
255 |
+
type=str,
|
256 |
+
help="Where to upload the converted model on the 🤗 hub.",
|
257 |
+
)
|
258 |
+
parser.add_argument(
|
259 |
+
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
|
260 |
+
)
|
261 |
+
parser.add_argument("--test_same_output", default=False, type=bool, help="If `True`, test if same output logits.")
|
262 |
+
|
263 |
+
args = parser.parse_args()
|
264 |
+
convert_musicgen_melody_checkpoint(
|
265 |
+
args.checkpoint, args.pytorch_dump_folder, args.push_to_hub, args.device, args.test_same_output
|
266 |
+
)
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/modeling_musicgen_melody.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/processing_musicgen_melody.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
Text/audio processor class for MusicGen Melody
|
17 |
+
"""
|
18 |
+
from typing import List, Optional
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
from ...processing_utils import ProcessorMixin
|
23 |
+
from ...utils import to_numpy
|
24 |
+
|
25 |
+
|
26 |
+
class MusicgenMelodyProcessor(ProcessorMixin):
|
27 |
+
r"""
|
28 |
+
Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor
|
29 |
+
class.
|
30 |
+
|
31 |
+
[`MusicgenProcessor`] offers all the functionalities of [`MusicgenMelodyFeatureExtractor`] and [`T5Tokenizer`]. See
|
32 |
+
[`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
feature_extractor (`MusicgenMelodyFeatureExtractor`):
|
36 |
+
An instance of [`MusicgenMelodyFeatureExtractor`]. The feature extractor is a required input.
|
37 |
+
tokenizer (`T5Tokenizer`):
|
38 |
+
An instance of [`T5Tokenizer`]. The tokenizer is a required input.
|
39 |
+
"""
|
40 |
+
|
41 |
+
feature_extractor_class = "MusicgenMelodyFeatureExtractor"
|
42 |
+
tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
|
43 |
+
|
44 |
+
def __init__(self, feature_extractor, tokenizer):
|
45 |
+
super().__init__(feature_extractor, tokenizer)
|
46 |
+
|
47 |
+
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.get_decoder_prompt_ids
|
48 |
+
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
|
49 |
+
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
|
50 |
+
|
51 |
+
def __call__(self, audio=None, text=None, **kwargs):
|
52 |
+
"""
|
53 |
+
Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio`
|
54 |
+
and `kwargs` arguments to MusicgenMelodyFeatureExtractor's [`~MusicgenMelodyFeatureExtractor.__call__`] if `audio` is not
|
55 |
+
`None` to pre-process the audio. It also forwards the `text` and `kwargs` arguments to
|
56 |
+
PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the doctsring of the above two methods for more information.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
audio (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
60 |
+
The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
|
61 |
+
of a NumPy array/PyTorch tensor, each audio should be a mono-stereo signal of shape (T), where T is the sample length of the audio.
|
62 |
+
text (`str`, `List[str]`, `List[List[str]]`):
|
63 |
+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
64 |
+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
65 |
+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
66 |
+
kwargs (*optional*):
|
67 |
+
Remaining dictionary of keyword arguments that will be passed to the feature extractor and/or the
|
68 |
+
tokenizer.
|
69 |
+
Returns:
|
70 |
+
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
|
71 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
72 |
+
- **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`.
|
73 |
+
- **attention_mask** -- List of token indices specifying which tokens should be attended to by the model when `text` is not `None`.
|
74 |
+
When only `audio` is specified, returns the timestamps attention mask.
|
75 |
+
"""
|
76 |
+
|
77 |
+
sampling_rate = kwargs.pop("sampling_rate", None)
|
78 |
+
|
79 |
+
if audio is None and text is None:
|
80 |
+
raise ValueError("You need to specify either an `audio` or `text` input to process.")
|
81 |
+
|
82 |
+
if text is not None:
|
83 |
+
inputs = self.tokenizer(text, **kwargs)
|
84 |
+
if audio is not None:
|
85 |
+
audio_inputs = self.feature_extractor(audio, sampling_rate=sampling_rate, **kwargs)
|
86 |
+
|
87 |
+
if text is None:
|
88 |
+
return audio_inputs
|
89 |
+
elif audio is None:
|
90 |
+
return inputs
|
91 |
+
else:
|
92 |
+
inputs["input_features"] = audio_inputs["input_features"]
|
93 |
+
return inputs
|
94 |
+
|
95 |
+
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.batch_decode with padding_mask->attention_mask
|
96 |
+
def batch_decode(self, *args, **kwargs):
|
97 |
+
"""
|
98 |
+
This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
|
99 |
+
from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
|
100 |
+
[`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
|
101 |
+
"""
|
102 |
+
audio_values = kwargs.pop("audio", None)
|
103 |
+
attention_mask = kwargs.pop("attention_mask", None)
|
104 |
+
|
105 |
+
if len(args) > 0:
|
106 |
+
audio_values = args[0]
|
107 |
+
args = args[1:]
|
108 |
+
|
109 |
+
if audio_values is not None:
|
110 |
+
return self._decode_audio(audio_values, attention_mask=attention_mask)
|
111 |
+
else:
|
112 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
113 |
+
|
114 |
+
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor.decode
|
115 |
+
def decode(self, *args, **kwargs):
|
116 |
+
"""
|
117 |
+
This method forwards all its arguments to T5Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
|
118 |
+
docstring of this method for more information.
|
119 |
+
"""
|
120 |
+
return self.tokenizer.decode(*args, **kwargs)
|
121 |
+
|
122 |
+
# Copied from transformers.models.musicgen.processing_musicgen.MusicgenProcessor._decode_audio with padding_mask->attention_mask
|
123 |
+
def _decode_audio(self, audio_values, attention_mask: Optional = None) -> List[np.ndarray]:
|
124 |
+
"""
|
125 |
+
This method strips any padding from the audio values to return a list of numpy audio arrays.
|
126 |
+
"""
|
127 |
+
audio_values = to_numpy(audio_values)
|
128 |
+
bsz, channels, seq_len = audio_values.shape
|
129 |
+
|
130 |
+
if attention_mask is None:
|
131 |
+
return list(audio_values)
|
132 |
+
|
133 |
+
attention_mask = to_numpy(attention_mask)
|
134 |
+
|
135 |
+
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
|
136 |
+
# token (so that the generated audio values are **not** treated as padded tokens)
|
137 |
+
difference = seq_len - attention_mask.shape[-1]
|
138 |
+
padding_value = 1 - self.feature_extractor.padding_value
|
139 |
+
attention_mask = np.pad(attention_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
|
140 |
+
|
141 |
+
audio_values = audio_values.tolist()
|
142 |
+
for i in range(bsz):
|
143 |
+
sliced_audio = np.asarray(audio_values[i])[
|
144 |
+
attention_mask[i][None, :] != self.feature_extractor.padding_value
|
145 |
+
]
|
146 |
+
audio_values[i] = sliced_audio.reshape(channels, -1)
|
147 |
+
|
148 |
+
return audio_values
|
149 |
+
|
150 |
+
def get_unconditional_inputs(self, num_samples=1, return_tensors="pt"):
|
151 |
+
"""
|
152 |
+
Helper function to get null inputs for unconditional generation, enabling the model to be used without the
|
153 |
+
feature extractor or tokenizer.
|
154 |
+
|
155 |
+
Args:
|
156 |
+
num_samples (int, *optional*):
|
157 |
+
Number of audio samples to unconditionally generate.
|
158 |
+
|
159 |
+
Example:
|
160 |
+
```python
|
161 |
+
>>> from transformers import MusicgenMelodyForConditionalGeneration, MusicgenMelodyProcessor
|
162 |
+
|
163 |
+
>>> model = MusicgenMelodyForConditionalGeneration.from_pretrained("facebook/musicgen-melody")
|
164 |
+
|
165 |
+
>>> # get the unconditional (or 'null') inputs for the model
|
166 |
+
>>> processor = MusicgenMelodyProcessor.from_pretrained("facebook/musicgen-melody")
|
167 |
+
>>> unconditional_inputs = processor.get_unconditional_inputs(num_samples=1)
|
168 |
+
|
169 |
+
>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
|
170 |
+
```"""
|
171 |
+
inputs = self.tokenizer([""] * num_samples, return_tensors=return_tensors, return_attention_mask=True)
|
172 |
+
inputs["attention_mask"][:] = 0
|
173 |
+
|
174 |
+
return inputs
|
llmeval-env/lib/python3.10/site-packages/transformers/models/owlv2/__init__.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
|
16 |
+
from ...utils import (
|
17 |
+
OptionalDependencyNotAvailable,
|
18 |
+
_LazyModule,
|
19 |
+
is_torch_available,
|
20 |
+
is_vision_available,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
_import_structure = {
|
25 |
+
"configuration_owlv2": [
|
26 |
+
"OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
27 |
+
"Owlv2Config",
|
28 |
+
"Owlv2TextConfig",
|
29 |
+
"Owlv2VisionConfig",
|
30 |
+
],
|
31 |
+
"processing_owlv2": ["Owlv2Processor"],
|
32 |
+
}
|
33 |
+
|
34 |
+
try:
|
35 |
+
if not is_vision_available():
|
36 |
+
raise OptionalDependencyNotAvailable()
|
37 |
+
except OptionalDependencyNotAvailable:
|
38 |
+
pass
|
39 |
+
else:
|
40 |
+
_import_structure["image_processing_owlv2"] = ["Owlv2ImageProcessor"]
|
41 |
+
|
42 |
+
|
43 |
+
try:
|
44 |
+
if not is_torch_available():
|
45 |
+
raise OptionalDependencyNotAvailable()
|
46 |
+
except OptionalDependencyNotAvailable:
|
47 |
+
pass
|
48 |
+
else:
|
49 |
+
_import_structure["modeling_owlv2"] = [
|
50 |
+
"OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST",
|
51 |
+
"Owlv2Model",
|
52 |
+
"Owlv2PreTrainedModel",
|
53 |
+
"Owlv2TextModel",
|
54 |
+
"Owlv2VisionModel",
|
55 |
+
"Owlv2ForObjectDetection",
|
56 |
+
]
|
57 |
+
|
58 |
+
if TYPE_CHECKING:
|
59 |
+
from .configuration_owlv2 import (
|
60 |
+
OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
61 |
+
Owlv2Config,
|
62 |
+
Owlv2TextConfig,
|
63 |
+
Owlv2VisionConfig,
|
64 |
+
)
|
65 |
+
from .processing_owlv2 import Owlv2Processor
|
66 |
+
|
67 |
+
try:
|
68 |
+
if not is_vision_available():
|
69 |
+
raise OptionalDependencyNotAvailable()
|
70 |
+
except OptionalDependencyNotAvailable:
|
71 |
+
pass
|
72 |
+
else:
|
73 |
+
from .image_processing_owlv2 import Owlv2ImageProcessor
|
74 |
+
|
75 |
+
try:
|
76 |
+
if not is_torch_available():
|
77 |
+
raise OptionalDependencyNotAvailable()
|
78 |
+
except OptionalDependencyNotAvailable:
|
79 |
+
pass
|
80 |
+
else:
|
81 |
+
from .modeling_owlv2 import (
|
82 |
+
OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST,
|
83 |
+
Owlv2ForObjectDetection,
|
84 |
+
Owlv2Model,
|
85 |
+
Owlv2PreTrainedModel,
|
86 |
+
Owlv2TextModel,
|
87 |
+
Owlv2VisionModel,
|
88 |
+
)
|
89 |
+
|
90 |
+
else:
|
91 |
+
import sys
|
92 |
+
|
93 |
+
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|