applied-ai-018 commited on
Commit
8171625
·
verified ·
1 Parent(s): 1bf242c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/modeling_conditional_detr.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py +109 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py +170 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py +252 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/image_processing_efficientformer.py +321 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_efficientformer.py +803 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_tf_efficientformer.py +1193 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__init__.py +75 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py +135 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py +219 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py +33 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py +233 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py +778 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__init__.py +85 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/__init__.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/configuration_gpt_neo.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/convert_gpt_neo_mesh_tf_to_pytorch.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/modeling_flax_gpt_neo.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/modeling_gpt_neo.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/configuration_gpt_neo.py +272 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py +72 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_flax_gpt_neo.py +684 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_gpt_neo.py +1346 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/configuration_grounding_dino.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/convert_grounding_dino_to_hf.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/image_processing_grounding_dino.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/modeling_grounding_dino.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/processing_grounding_dino.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/modeling_tf_layoutlm.py +1685 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/tokenization_layoutlm.py +504 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__init__.py +57 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/configuration_pegasus_x.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/modeling_pegasus_x.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/configuration_pegasus_x.py +177 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/modeling_pegasus_x.py +1627 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/udop/__init__.py +98 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/modeling_conditional_detr.cpython-310.pyc ADDED
Binary file (93.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__init__.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_efficientformer": [
27
+ "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
28
+ "EfficientFormerConfig",
29
+ ]
30
+ }
31
+
32
+ try:
33
+ if not is_vision_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["image_processing_efficientformer"] = ["EfficientFormerImageProcessor"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_efficientformer"] = [
47
+ "EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "EfficientFormerForImageClassification",
49
+ "EfficientFormerForImageClassificationWithTeacher",
50
+ "EfficientFormerModel",
51
+ "EfficientFormerPreTrainedModel",
52
+ ]
53
+
54
+ try:
55
+ if not is_tf_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ _import_structure["modeling_tf_efficientformer"] = [
61
+ "TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
62
+ "TFEfficientFormerForImageClassification",
63
+ "TFEfficientFormerForImageClassificationWithTeacher",
64
+ "TFEfficientFormerModel",
65
+ "TFEfficientFormerPreTrainedModel",
66
+ ]
67
+
68
+ if TYPE_CHECKING:
69
+ from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
70
+
71
+ try:
72
+ if not is_vision_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .image_processing_efficientformer import EfficientFormerImageProcessor
78
+
79
+ try:
80
+ if not is_torch_available():
81
+ raise OptionalDependencyNotAvailable()
82
+ except OptionalDependencyNotAvailable:
83
+ pass
84
+ else:
85
+ from .modeling_efficientformer import (
86
+ EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
87
+ EfficientFormerForImageClassification,
88
+ EfficientFormerForImageClassificationWithTeacher,
89
+ EfficientFormerModel,
90
+ EfficientFormerPreTrainedModel,
91
+ )
92
+ try:
93
+ if not is_tf_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_tf_efficientformer import (
99
+ TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ TFEfficientFormerForImageClassification,
101
+ TFEfficientFormerForImageClassificationWithTeacher,
102
+ TFEfficientFormerModel,
103
+ TFEfficientFormerPreTrainedModel,
104
+ )
105
+
106
+ else:
107
+ import sys
108
+
109
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/configuration_efficientformer.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/image_processing_efficientformer.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_efficientformer.cpython-310.pyc ADDED
Binary file (27.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/__pycache__/modeling_tf_efficientformer.cpython-310.pyc ADDED
Binary file (37.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/configuration_efficientformer.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ EfficientFormer model configuration"""
16
+
17
+ from typing import List
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class EfficientFormerConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of an [`EfficientFormerModel`]. It is used to
32
+ instantiate an EfficientFormer model according to the specified arguments, defining the model architecture.
33
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the EfficientFormer
34
+ [snap-research/efficientformer-l1](https://huggingface.co/snap-research/efficientformer-l1) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ depths (`List(int)`, *optional*, defaults to `[3, 2, 6, 4]`)
41
+ Depth of each stage.
42
+ hidden_sizes (`List(int)`, *optional*, defaults to `[48, 96, 224, 448]`)
43
+ Dimensionality of each stage.
44
+ downsamples (`List(bool)`, *optional*, defaults to `[True, True, True, True]`)
45
+ Whether or not to downsample inputs between two stages.
46
+ dim (`int`, *optional*, defaults to 448):
47
+ Number of channels in Meta3D layers
48
+ key_dim (`int`, *optional*, defaults to 32):
49
+ The size of the key in meta3D block.
50
+ attention_ratio (`int`, *optional*, defaults to 4):
51
+ Ratio of the dimension of the query and value to the dimension of the key in MSHA block
52
+ resolution (`int`, *optional*, defaults to 7)
53
+ Size of each patch
54
+ num_hidden_layers (`int`, *optional*, defaults to 5):
55
+ Number of hidden layers in the Transformer encoder.
56
+ num_attention_heads (`int`, *optional*, defaults to 8):
57
+ Number of attention heads for each attention layer in the 3D MetaBlock.
58
+ mlp_expansion_ratio (`int`, *optional*, defaults to 4):
59
+ Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings and encoder.
62
+ patch_size (`int`, *optional*, defaults to 16):
63
+ The size (resolution) of each patch.
64
+ num_channels (`int`, *optional*, defaults to 3):
65
+ The number of input channels.
66
+ pool_size (`int`, *optional*, defaults to 3):
67
+ Kernel size of pooling layers.
68
+ downsample_patch_size (`int`, *optional*, defaults to 3):
69
+ The size of patches in downsampling layers.
70
+ downsample_stride (`int`, *optional*, defaults to 2):
71
+ The stride of convolution kernels in downsampling layers.
72
+ downsample_pad (`int`, *optional*, defaults to 1):
73
+ Padding in downsampling layers.
74
+ drop_path_rate (`int`, *optional*, defaults to 0):
75
+ Rate at which to increase dropout probability in DropPath.
76
+ num_meta3d_blocks (`int`, *optional*, defaults to 1):
77
+ The number of 3D MetaBlocks in the last stage.
78
+ distillation (`bool`, *optional*, defaults to `True`):
79
+ Whether to add a distillation head.
80
+ use_layer_scale (`bool`, *optional*, defaults to `True`):
81
+ Whether to scale outputs from token mixers.
82
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-5):
83
+ Factor by which outputs from token mixers are scaled.
84
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
85
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
86
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
87
+ initializer_range (`float`, *optional*, defaults to 0.02):
88
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
89
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
90
+ The epsilon used by the layer normalization layers.
91
+ image_size (`int`, *optional*, defaults to `224`):
92
+ The size (resolution) of each image.
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import EfficientFormerConfig, EfficientFormerModel
98
+
99
+ >>> # Initializing a EfficientFormer efficientformer-l1 style configuration
100
+ >>> configuration = EfficientFormerConfig()
101
+
102
+ >>> # Initializing a EfficientFormerModel (with random weights) from the efficientformer-l3 style configuration
103
+ >>> model = EfficientFormerModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+
109
+ model_type = "efficientformer"
110
+
111
+ def __init__(
112
+ self,
113
+ depths: List[int] = [3, 2, 6, 4],
114
+ hidden_sizes: List[int] = [48, 96, 224, 448],
115
+ downsamples: List[bool] = [True, True, True, True],
116
+ dim: int = 448,
117
+ key_dim: int = 32,
118
+ attention_ratio: int = 4,
119
+ resolution: int = 7,
120
+ num_hidden_layers: int = 5,
121
+ num_attention_heads: int = 8,
122
+ mlp_expansion_ratio: int = 4,
123
+ hidden_dropout_prob: float = 0.0,
124
+ patch_size: int = 16,
125
+ num_channels: int = 3,
126
+ pool_size: int = 3,
127
+ downsample_patch_size: int = 3,
128
+ downsample_stride: int = 2,
129
+ downsample_pad: int = 1,
130
+ drop_path_rate: float = 0.0,
131
+ num_meta3d_blocks: int = 1,
132
+ distillation: bool = True,
133
+ use_layer_scale: bool = True,
134
+ layer_scale_init_value: float = 1e-5,
135
+ hidden_act: str = "gelu",
136
+ initializer_range: float = 0.02,
137
+ layer_norm_eps: float = 1e-12,
138
+ image_size: int = 224,
139
+ batch_norm_eps: float = 1e-05,
140
+ **kwargs,
141
+ ) -> None:
142
+ super().__init__(**kwargs)
143
+
144
+ self.hidden_act = hidden_act
145
+ self.hidden_dropout_prob = hidden_dropout_prob
146
+ self.hidden_sizes = hidden_sizes
147
+ self.num_hidden_layers = num_hidden_layers
148
+ self.num_attention_heads = num_attention_heads
149
+ self.initializer_range = initializer_range
150
+ self.layer_norm_eps = layer_norm_eps
151
+ self.patch_size = patch_size
152
+ self.num_channels = num_channels
153
+ self.depths = depths
154
+ self.mlp_expansion_ratio = mlp_expansion_ratio
155
+ self.downsamples = downsamples
156
+ self.dim = dim
157
+ self.key_dim = key_dim
158
+ self.attention_ratio = attention_ratio
159
+ self.resolution = resolution
160
+ self.pool_size = pool_size
161
+ self.downsample_patch_size = downsample_patch_size
162
+ self.downsample_stride = downsample_stride
163
+ self.downsample_pad = downsample_pad
164
+ self.drop_path_rate = drop_path_rate
165
+ self.num_meta3d_blocks = num_meta3d_blocks
166
+ self.distillation = distillation
167
+ self.use_layer_scale = use_layer_scale
168
+ self.layer_scale_init_value = layer_scale_init_value
169
+ self.image_size = image_size
170
+ self.batch_norm_eps = batch_norm_eps
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert EfficientFormer checkpoints from the original repository.
17
+
18
+ URL: https://github.com/snap-research/EfficientFormer
19
+ """
20
+
21
+ import argparse
22
+ import re
23
+ from pathlib import Path
24
+
25
+ import requests
26
+ import torch
27
+ from PIL import Image
28
+ from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
29
+
30
+ from transformers import (
31
+ EfficientFormerConfig,
32
+ EfficientFormerForImageClassificationWithTeacher,
33
+ EfficientFormerImageProcessor,
34
+ )
35
+ from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
36
+
37
+
38
+ def rename_key(old_name, num_meta4D_last_stage):
39
+ new_name = old_name
40
+
41
+ if "patch_embed" in old_name:
42
+ _, layer, param = old_name.split(".")
43
+
44
+ if layer == "0":
45
+ new_name = old_name.replace("0", "convolution1")
46
+ elif layer == "1":
47
+ new_name = old_name.replace("1", "batchnorm_before")
48
+ elif layer == "3":
49
+ new_name = old_name.replace("3", "convolution2")
50
+ else:
51
+ new_name = old_name.replace("4", "batchnorm_after")
52
+
53
+ if "network" in old_name and re.search(r"\d\.\d", old_name):
54
+ two_digit_num = r"\b\d{2}\b"
55
+ if bool(re.search(two_digit_num, old_name)):
56
+ match = re.search(r"\d\.\d\d.", old_name).group()
57
+ else:
58
+ match = re.search(r"\d\.\d.", old_name).group()
59
+ if int(match[0]) < 6:
60
+ trimmed_name = old_name.replace(match, "")
61
+ trimmed_name = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1])
62
+ new_name = "intermediate_stages." + trimmed_name
63
+ else:
64
+ trimmed_name = old_name.replace(match, "")
65
+ if int(match[2]) < num_meta4D_last_stage:
66
+ trimmed_name = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2])
67
+ else:
68
+ layer_index = str(int(match[2]) - num_meta4D_last_stage)
69
+ trimmed_name = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index)
70
+ if "norm1" in old_name:
71
+ trimmed_name = trimmed_name.replace("norm1", "layernorm1")
72
+ elif "norm2" in old_name:
73
+ trimmed_name = trimmed_name.replace("norm2", "layernorm2")
74
+ elif "fc1" in old_name:
75
+ trimmed_name = trimmed_name.replace("fc1", "linear_in")
76
+ elif "fc2" in old_name:
77
+ trimmed_name = trimmed_name.replace("fc2", "linear_out")
78
+
79
+ new_name = "last_stage." + trimmed_name
80
+
81
+ elif "network" in old_name and re.search(r".\d.", old_name):
82
+ new_name = old_name.replace("network", "intermediate_stages")
83
+
84
+ if "fc" in new_name:
85
+ new_name = new_name.replace("fc", "convolution")
86
+ elif ("norm1" in new_name) and ("layernorm1" not in new_name):
87
+ new_name = new_name.replace("norm1", "batchnorm_before")
88
+ elif ("norm2" in new_name) and ("layernorm2" not in new_name):
89
+ new_name = new_name.replace("norm2", "batchnorm_after")
90
+ if "proj" in new_name:
91
+ new_name = new_name.replace("proj", "projection")
92
+ if "dist_head" in new_name:
93
+ new_name = new_name.replace("dist_head", "distillation_classifier")
94
+ elif "head" in new_name:
95
+ new_name = new_name.replace("head", "classifier")
96
+ elif "patch_embed" in new_name:
97
+ new_name = "efficientformer." + new_name
98
+ elif new_name == "norm.weight" or new_name == "norm.bias":
99
+ new_name = new_name.replace("norm", "layernorm")
100
+ new_name = "efficientformer." + new_name
101
+ else:
102
+ new_name = "efficientformer.encoder." + new_name
103
+
104
+ return new_name
105
+
106
+
107
+ def convert_torch_checkpoint(checkpoint, num_meta4D_last_stage):
108
+ for key in checkpoint.copy().keys():
109
+ val = checkpoint.pop(key)
110
+ checkpoint[rename_key(key, num_meta4D_last_stage)] = val
111
+
112
+ return checkpoint
113
+
114
+
115
+ # We will verify our results on a COCO image
116
+ def prepare_img():
117
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
118
+ image = Image.open(requests.get(url, stream=True).raw)
119
+
120
+ return image
121
+
122
+
123
+ def convert_efficientformer_checkpoint(
124
+ checkpoint_path: Path, efficientformer_config_file: Path, pytorch_dump_path: Path, push_to_hub: bool
125
+ ):
126
+ orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
127
+ config = EfficientFormerConfig.from_json_file(efficientformer_config_file)
128
+ model = EfficientFormerForImageClassificationWithTeacher(config)
129
+ model_name = "_".join(checkpoint_path.split("/")[-1].split(".")[0].split("_")[:-1])
130
+
131
+ num_meta4D_last_stage = config.depths[-1] - config.num_meta3d_blocks + 1
132
+ new_state_dict = convert_torch_checkpoint(orig_state_dict, num_meta4D_last_stage)
133
+
134
+ model.load_state_dict(new_state_dict)
135
+ model.eval()
136
+
137
+ pillow_resamplings = {
138
+ "bilinear": PILImageResampling.BILINEAR,
139
+ "bicubic": PILImageResampling.BICUBIC,
140
+ "nearest": PILImageResampling.NEAREST,
141
+ }
142
+
143
+ # prepare image
144
+ image = prepare_img()
145
+ image_size = 256
146
+ crop_size = 224
147
+ processor = EfficientFormerImageProcessor(
148
+ size={"shortest_edge": image_size},
149
+ crop_size={"height": crop_size, "width": crop_size},
150
+ resample=pillow_resamplings["bicubic"],
151
+ )
152
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values
153
+
154
+ # original processing pipeline
155
+ image_transforms = Compose(
156
+ [
157
+ Resize(image_size, interpolation=pillow_resamplings["bicubic"]),
158
+ CenterCrop(crop_size),
159
+ ToTensor(),
160
+ Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
161
+ ]
162
+ )
163
+ original_pixel_values = image_transforms(image).unsqueeze(0)
164
+
165
+ assert torch.allclose(original_pixel_values, pixel_values)
166
+
167
+ outputs = model(pixel_values)
168
+ logits = outputs.logits
169
+
170
+ expected_shape = (1, 1000)
171
+
172
+ if "l1" in model_name:
173
+ expected_logits = torch.Tensor(
174
+ [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328]
175
+ )
176
+ assert torch.allclose(logits[0, :10], expected_logits, atol=1e-3)
177
+ assert logits.shape == expected_shape
178
+ elif "l3" in model_name:
179
+ expected_logits = torch.Tensor(
180
+ [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127]
181
+ )
182
+ assert torch.allclose(logits[0, :10], expected_logits, atol=1e-3)
183
+ assert logits.shape == expected_shape
184
+ elif "l7" in model_name:
185
+ expected_logits = torch.Tensor(
186
+ [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878]
187
+ )
188
+ assert logits.shape == expected_shape
189
+ else:
190
+ raise ValueError(
191
+ f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7"
192
+ )
193
+
194
+ # Save Checkpoints
195
+ Path(pytorch_dump_path).mkdir(exist_ok=True)
196
+ model.save_pretrained(pytorch_dump_path)
197
+ print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
198
+ processor.save_pretrained(pytorch_dump_path)
199
+ print(f"Processor successfuly saved at {pytorch_dump_path}")
200
+
201
+ if push_to_hub:
202
+ print("Pushing model to the hub...")
203
+
204
+ model.push_to_hub(
205
+ repo_id=f"Bearnardd/{pytorch_dump_path}",
206
+ commit_message="Add model",
207
+ use_temp_dir=True,
208
+ )
209
+ processor.push_to_hub(
210
+ repo_id=f"Bearnardd/{pytorch_dump_path}",
211
+ commit_message="Add image processor",
212
+ use_temp_dir=True,
213
+ )
214
+
215
+
216
+ if __name__ == "__main__":
217
+ parser = argparse.ArgumentParser()
218
+ # Required parameters
219
+ parser.add_argument(
220
+ "--pytorch_model_path",
221
+ default=None,
222
+ type=str,
223
+ required=True,
224
+ help="Path to EfficientFormer pytorch checkpoint.",
225
+ )
226
+ parser.add_argument(
227
+ "--config_file",
228
+ default=None,
229
+ type=str,
230
+ required=True,
231
+ help="The json file for EfficientFormer model config.",
232
+ )
233
+ parser.add_argument(
234
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
235
+ )
236
+
237
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
238
+ parser.add_argument(
239
+ "--no-push_to_hub",
240
+ dest="push_to_hub",
241
+ action="store_false",
242
+ help="Do not push model and image processor to the hub",
243
+ )
244
+ parser.set_defaults(push_to_hub=True)
245
+
246
+ args = parser.parse_args()
247
+ convert_efficientformer_checkpoint(
248
+ checkpoint_path=args.pytorch_model_path,
249
+ efficientformer_config_file=args.config_file,
250
+ pytorch_dump_path=args.pytorch_dump_path,
251
+ push_to_hub=args.push_to_hub,
252
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/image_processing_efficientformer.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for EfficientFormer."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_DEFAULT_MEAN,
29
+ IMAGENET_DEFAULT_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_batched,
35
+ is_scaled_image,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class EfficientFormerImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a EfficientFormer image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
54
+ size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
55
+ size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
56
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
57
+ method.
58
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
59
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
60
+ `preprocess` method.
61
+ do_center_crop (`bool`, *optional*, defaults to `True`):
62
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
63
+ `preprocess` method.
64
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
65
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
66
+ method.
67
+ do_rescale (`bool`, *optional*, defaults to `True`):
68
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
69
+ parameter in the `preprocess` method.
70
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
71
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
72
+ `preprocess` method.
73
+ do_normalize:
74
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
75
+ method.
76
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
77
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
78
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
79
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
80
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
81
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
82
+ """
83
+
84
+ model_input_names = ["pixel_values"]
85
+
86
+ def __init__(
87
+ self,
88
+ do_resize: bool = True,
89
+ size: Optional[Dict[str, int]] = None,
90
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
91
+ do_center_crop: bool = True,
92
+ do_rescale: bool = True,
93
+ rescale_factor: Union[int, float] = 1 / 255,
94
+ crop_size: Dict[str, int] = None,
95
+ do_normalize: bool = True,
96
+ image_mean: Optional[Union[float, List[float]]] = None,
97
+ image_std: Optional[Union[float, List[float]]] = None,
98
+ **kwargs,
99
+ ) -> None:
100
+ super().__init__(**kwargs)
101
+ size = size if size is not None else {"height": 224, "width": 224}
102
+ size = get_size_dict(size)
103
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
104
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
105
+
106
+ self.do_resize = do_resize
107
+ self.do_rescale = do_rescale
108
+ self.do_normalize = do_normalize
109
+ self.do_center_crop = do_center_crop
110
+ self.crop_size = crop_size
111
+ self.size = size
112
+ self.resample = resample
113
+ self.rescale_factor = rescale_factor
114
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
115
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
116
+ self._valid_processor_keys = [
117
+ "images",
118
+ "do_resize",
119
+ "size",
120
+ "resample",
121
+ "do_center_crop",
122
+ "crop_size",
123
+ "do_rescale",
124
+ "rescale_factor",
125
+ "do_normalize",
126
+ "image_mean",
127
+ "image_std",
128
+ "return_tensors",
129
+ "data_format",
130
+ "input_data_format",
131
+ ]
132
+
133
+ def resize(
134
+ self,
135
+ image: np.ndarray,
136
+ size: Dict[str, int],
137
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
138
+ data_format: Optional[Union[str, ChannelDimension]] = None,
139
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
140
+ **kwargs,
141
+ ) -> np.ndarray:
142
+ """
143
+ Resize an image to `(size["height"], size["width"])`.
144
+
145
+ Args:
146
+ image (`np.ndarray`):
147
+ Image to resize.
148
+ size (`Dict[str, int]`):
149
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
150
+ resample:
151
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
152
+ data_format (`ChannelDimension` or `str`, *optional*):
153
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
154
+ image is used. Can be one of:
155
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
156
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
157
+ input_data_format (`ChannelDimension` or `str`, *optional*):
158
+ The channel dimension format of the input image. If not provided, it will be inferred.
159
+
160
+ Returns:
161
+ `np.ndarray`: The resized image.
162
+ """
163
+ size = get_size_dict(size)
164
+
165
+ if "shortest_edge" in size:
166
+ size = get_resize_output_image_size(
167
+ image, size=size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
168
+ )
169
+ # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
170
+ elif "height" in size and "width" in size:
171
+ size = (size["height"], size["width"])
172
+ else:
173
+ raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}")
174
+ return resize(
175
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
176
+ )
177
+
178
+ def preprocess(
179
+ self,
180
+ images: ImageInput,
181
+ do_resize: Optional[bool] = None,
182
+ size: Dict[str, int] = None,
183
+ resample: PILImageResampling = None,
184
+ do_center_crop: bool = None,
185
+ crop_size: int = None,
186
+ do_rescale: Optional[bool] = None,
187
+ rescale_factor: Optional[float] = None,
188
+ do_normalize: Optional[bool] = None,
189
+ image_mean: Optional[Union[float, List[float]]] = None,
190
+ image_std: Optional[Union[float, List[float]]] = None,
191
+ return_tensors: Optional[Union[str, TensorType]] = None,
192
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
193
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
194
+ **kwargs,
195
+ ) -> BatchFeature:
196
+ """
197
+ Preprocess an image or batch of images.
198
+
199
+ Args:
200
+ images (`ImageInput`):
201
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
202
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
203
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
204
+ Whether to resize the image.
205
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
206
+ Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
207
+ resizing.
208
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
209
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
210
+ an effect if `do_resize` is set to `True`.
211
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
212
+ Whether to center crop the image.
213
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
214
+ Whether to rescale the image values between [0 - 1].
215
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
216
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
217
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
218
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
219
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
220
+ Whether to normalize the image.
221
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
222
+ Image mean to use if `do_normalize` is set to `True`.
223
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
224
+ Image standard deviation to use if `do_normalize` is set to `True`.
225
+ return_tensors (`str` or `TensorType`, *optional*):
226
+ The type of tensors to return. Can be one of:
227
+ - Unset: Return a list of `np.ndarray`.
228
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
229
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
230
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
231
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
232
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
233
+ The channel dimension format for the output image. Can be one of:
234
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
235
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
236
+ - Unset: Use the channel dimension format of the input image.
237
+ input_data_format (`ChannelDimension` or `str`, *optional*):
238
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
239
+ from the input image. Can be one of:
240
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
241
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
242
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
243
+ """
244
+ do_resize = do_resize if do_resize is not None else self.do_resize
245
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
246
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
247
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
248
+ crop_size = crop_size if crop_size is not None else self.crop_size
249
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
250
+ resample = resample if resample is not None else self.resample
251
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
252
+ image_mean = image_mean if image_mean is not None else self.image_mean
253
+ image_std = image_std if image_std is not None else self.image_std
254
+
255
+ size = size if size is not None else self.size
256
+ size_dict = get_size_dict(size)
257
+
258
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
259
+
260
+ if not is_batched(images):
261
+ images = [images]
262
+
263
+ if not valid_images(images):
264
+ raise ValueError(
265
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
266
+ "torch.Tensor, tf.Tensor or jax.ndarray."
267
+ )
268
+ validate_preprocess_arguments(
269
+ do_rescale=do_rescale,
270
+ rescale_factor=rescale_factor,
271
+ do_normalize=do_normalize,
272
+ image_mean=image_mean,
273
+ image_std=image_std,
274
+ do_center_crop=do_center_crop,
275
+ crop_size=crop_size,
276
+ do_resize=do_resize,
277
+ size=size,
278
+ resample=resample,
279
+ )
280
+ # All transformations expect numpy arrays.
281
+ images = [to_numpy_array(image) for image in images]
282
+
283
+ if is_scaled_image(images[0]) and do_rescale:
284
+ logger.warning_once(
285
+ "It looks like you are trying to rescale already rescaled images. If the input"
286
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
287
+ )
288
+
289
+ if input_data_format is None:
290
+ # We assume that all images have the same channel dimension format.
291
+ input_data_format = infer_channel_dimension_format(images[0])
292
+
293
+ if do_resize:
294
+ images = [
295
+ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
296
+ for image in images
297
+ ]
298
+
299
+ if do_center_crop:
300
+ images = [
301
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
302
+ ]
303
+
304
+ if do_rescale:
305
+ images = [
306
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
307
+ for image in images
308
+ ]
309
+
310
+ if do_normalize:
311
+ images = [
312
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
313
+ for image in images
314
+ ]
315
+
316
+ images = [
317
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
318
+ ]
319
+
320
+ data = {"pixel_values": images}
321
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_efficientformer.py ADDED
@@ -0,0 +1,803 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Snapchat Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch EfficientFormer model."""
16
+
17
+ import itertools
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
28
+ from ...modeling_utils import PreTrainedModel
29
+ from ...utils import (
30
+ ModelOutput,
31
+ add_code_sample_docstrings,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ )
36
+ from .configuration_efficientformer import EfficientFormerConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ # General docstring
42
+ _CONFIG_FOR_DOC = "EfficientFormerConfig"
43
+
44
+ # Base docstring
45
+ _CHECKPOINT_FOR_DOC = "snap-research/efficientformer-l1-300"
46
+ _EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
47
+
48
+ # Image classification docstring
49
+ _IMAGE_CLASS_CHECKPOINT = "snap-research/efficientformer-l1-300"
50
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "Egyptian cat"
51
+
52
+
53
+ from ..deprecated._archive_maps import EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
54
+
55
+
56
+ class EfficientFormerPatchEmbeddings(nn.Module):
57
+ """
58
+ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
59
+ height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
60
+ """
61
+
62
+ def __init__(self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool = True):
63
+ super().__init__()
64
+ self.num_channels = num_channels
65
+
66
+ self.projection = nn.Conv2d(
67
+ num_channels,
68
+ embed_dim,
69
+ kernel_size=config.downsample_patch_size,
70
+ stride=config.downsample_stride,
71
+ padding=config.downsample_pad,
72
+ )
73
+ self.norm = nn.BatchNorm2d(embed_dim, eps=config.batch_norm_eps) if apply_norm else nn.Identity()
74
+
75
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
76
+ batch_size, num_channels, height, width = pixel_values.shape
77
+ if num_channels != self.num_channels:
78
+ raise ValueError(
79
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
80
+ )
81
+
82
+ embeddings = self.projection(pixel_values)
83
+ embeddings = self.norm(embeddings)
84
+
85
+ return embeddings
86
+
87
+
88
+ class EfficientFormerSelfAttention(nn.Module):
89
+ def __init__(self, dim: int, key_dim: int, num_heads: int, attention_ratio: int, resolution: int):
90
+ super().__init__()
91
+
92
+ self.num_heads = num_heads
93
+ self.key_dim = key_dim
94
+ self.attention_ratio = attention_ratio
95
+ self.scale = key_dim**-0.5
96
+ self.total_key_dim = key_dim * num_heads
97
+ self.expanded_key_dim = int(attention_ratio * key_dim)
98
+ self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
99
+ hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
100
+ self.qkv = nn.Linear(dim, hidden_size)
101
+ self.projection = nn.Linear(self.total_expanded_key_dim, dim)
102
+ points = list(itertools.product(range(resolution), range(resolution)))
103
+ num_points = len(points)
104
+ attention_offsets = {}
105
+ idxs = []
106
+ for point_1 in points:
107
+ for point_2 in points:
108
+ offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
109
+ if offset not in attention_offsets:
110
+ attention_offsets[offset] = len(attention_offsets)
111
+ idxs.append(attention_offsets[offset])
112
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
113
+ self.register_buffer("attention_bias_idxs", torch.LongTensor(idxs).view(num_points, num_points))
114
+
115
+ @torch.no_grad()
116
+ def train(self, mode=True):
117
+ super().train(mode)
118
+ if mode and hasattr(self, "ab"):
119
+ del self.ab
120
+ else:
121
+ self.ab = self.attention_biases[:, self.attention_bias_idxs]
122
+
123
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
124
+ batch_size, sequence_length, num_channels = hidden_states.shape
125
+ qkv = self.qkv(hidden_states)
126
+ query_layer, key_layer, value_layer = qkv.reshape(batch_size, sequence_length, self.num_heads, -1).split(
127
+ [self.key_dim, self.key_dim, self.expanded_key_dim], dim=3
128
+ )
129
+ query_layer = query_layer.permute(0, 2, 1, 3)
130
+ key_layer = key_layer.permute(0, 2, 1, 3)
131
+ value_layer = value_layer.permute(0, 2, 1, 3)
132
+
133
+ # set `model.to(torch_device)` won't change `self.ab.device`, if there is no follow-up `train` or `eval` call.
134
+ # Let's do it manually here, so users won't have to do this everytime.
135
+ if not self.training:
136
+ self.ab = self.ab.to(self.attention_biases.device)
137
+ attention_probs = (torch.matmul(query_layer, key_layer.transpose(-2, -1))) * self.scale + (
138
+ self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab
139
+ )
140
+
141
+ attention_probs = attention_probs.softmax(dim=-1)
142
+
143
+ context_layer = torch.matmul(attention_probs, value_layer).transpose(1, 2)
144
+ context_layer = context_layer.reshape(batch_size, sequence_length, self.total_expanded_key_dim)
145
+ context_layer = self.projection(context_layer)
146
+
147
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
148
+
149
+ return outputs
150
+
151
+
152
+ class EfficientFormerConvStem(nn.Module):
153
+ def __init__(self, config: EfficientFormerConfig, out_channels: int):
154
+ super().__init__()
155
+
156
+ self.convolution1 = nn.Conv2d(config.num_channels, out_channels // 2, kernel_size=3, stride=2, padding=1)
157
+ self.batchnorm_before = nn.BatchNorm2d(out_channels // 2, eps=config.batch_norm_eps)
158
+
159
+ self.convolution2 = nn.Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2, padding=1)
160
+ self.batchnorm_after = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps)
161
+
162
+ self.activation = nn.ReLU()
163
+
164
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
165
+ features = self.batchnorm_before(self.convolution1(pixel_values))
166
+ features = self.activation(features)
167
+ features = self.batchnorm_after(self.convolution2(features))
168
+ features = self.activation(features)
169
+
170
+ return features
171
+
172
+
173
+ class EfficientFormerPooling(nn.Module):
174
+ def __init__(self, pool_size: int):
175
+ super().__init__()
176
+ self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
177
+
178
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
179
+ output = self.pool(hidden_states) - hidden_states
180
+ return output
181
+
182
+
183
+ class EfficientFormerDenseMlp(nn.Module):
184
+ def __init__(
185
+ self,
186
+ config: EfficientFormerConfig,
187
+ in_features: int,
188
+ hidden_features: Optional[int] = None,
189
+ out_features: Optional[int] = None,
190
+ ):
191
+ super().__init__()
192
+ out_features = out_features or in_features
193
+ hidden_features = hidden_features or in_features
194
+
195
+ self.linear_in = nn.Linear(in_features, hidden_features)
196
+ self.activation = ACT2FN[config.hidden_act]
197
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
198
+ self.linear_out = nn.Linear(hidden_features, out_features)
199
+
200
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
201
+ hidden_states = self.linear_in(hidden_states)
202
+ hidden_states = self.activation(hidden_states)
203
+ hidden_states = self.dropout(hidden_states)
204
+ hidden_states = self.linear_out(hidden_states)
205
+ hidden_states = self.dropout(hidden_states)
206
+
207
+ return hidden_states
208
+
209
+
210
+ class EfficientFormerConvMlp(nn.Module):
211
+ def __init__(
212
+ self,
213
+ config: EfficientFormerConfig,
214
+ in_features: int,
215
+ hidden_features: Optional[int] = None,
216
+ out_features: Optional[int] = None,
217
+ drop: float = 0.0,
218
+ ):
219
+ super().__init__()
220
+ out_features = out_features or in_features
221
+ hidden_features = hidden_features or in_features
222
+
223
+ self.convolution1 = nn.Conv2d(in_features, hidden_features, 1)
224
+ self.activation = ACT2FN[config.hidden_act]
225
+ self.convolution2 = nn.Conv2d(hidden_features, out_features, 1)
226
+ self.dropout = nn.Dropout(drop)
227
+
228
+ self.batchnorm_before = nn.BatchNorm2d(hidden_features, eps=config.batch_norm_eps)
229
+ self.batchnorm_after = nn.BatchNorm2d(out_features, eps=config.batch_norm_eps)
230
+
231
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
232
+ hidden_state = self.convolution1(hidden_state)
233
+ hidden_state = self.batchnorm_before(hidden_state)
234
+
235
+ hidden_state = self.activation(hidden_state)
236
+ hidden_state = self.dropout(hidden_state)
237
+ hidden_state = self.convolution2(hidden_state)
238
+
239
+ hidden_state = self.batchnorm_after(hidden_state)
240
+ hidden_state = self.dropout(hidden_state)
241
+
242
+ return hidden_state
243
+
244
+
245
+ # Copied from transformers.models.convnext.modeling_convnext.drop_path
246
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
247
+ """
248
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
249
+
250
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
251
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
252
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
253
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
254
+ argument.
255
+ """
256
+ if drop_prob == 0.0 or not training:
257
+ return input
258
+ keep_prob = 1 - drop_prob
259
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
260
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
261
+ random_tensor.floor_() # binarize
262
+ output = input.div(keep_prob) * random_tensor
263
+ return output
264
+
265
+
266
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->EfficientFormer
267
+ class EfficientFormerDropPath(nn.Module):
268
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
269
+
270
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
271
+ super().__init__()
272
+ self.drop_prob = drop_prob
273
+
274
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
275
+ return drop_path(hidden_states, self.drop_prob, self.training)
276
+
277
+ def extra_repr(self) -> str:
278
+ return "p={}".format(self.drop_prob)
279
+
280
+
281
+ class EfficientFormerFlat(nn.Module):
282
+ def __init__(self):
283
+ super().__init__()
284
+
285
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
286
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
287
+ return hidden_states
288
+
289
+
290
+ class EfficientFormerMeta3D(nn.Module):
291
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0):
292
+ super().__init__()
293
+
294
+ self.token_mixer = EfficientFormerSelfAttention(
295
+ dim=config.dim,
296
+ key_dim=config.key_dim,
297
+ num_heads=config.num_attention_heads,
298
+ attention_ratio=config.attention_ratio,
299
+ resolution=config.resolution,
300
+ )
301
+
302
+ self.layernorm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
303
+ self.layernorm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
304
+
305
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
306
+ self.mlp = EfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim)
307
+
308
+ self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
309
+ self.use_layer_scale = config.use_layer_scale
310
+ if config.use_layer_scale:
311
+ self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
312
+ self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
313
+
314
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
315
+ self_attention_outputs = self.token_mixer(self.layernorm1(hidden_states), output_attentions)
316
+ attention_output = self_attention_outputs[0]
317
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
318
+
319
+ if self.use_layer_scale:
320
+ layer_output = hidden_states + self.drop_path(
321
+ self.layer_scale_1.unsqueeze(0).unsqueeze(0) * attention_output
322
+ )
323
+ layer_output = layer_output + self.drop_path(
324
+ self.layer_scale_2.unsqueeze(0).unsqueeze(0) * self.mlp(self.layernorm2(layer_output))
325
+ )
326
+ else:
327
+ layer_output = hidden_states + self.drop_path(attention_output)
328
+ layer_output = layer_output + self.drop_path(self.mlp(self.layernorm2(layer_output)))
329
+
330
+ outputs = (layer_output,) + outputs
331
+
332
+ return outputs
333
+
334
+
335
+ class EfficientFormerMeta3DLayers(nn.Module):
336
+ def __init__(self, config: EfficientFormerConfig):
337
+ super().__init__()
338
+ drop_paths = [
339
+ config.drop_path_rate * (block_idx + sum(config.depths[:-1]))
340
+ for block_idx in range(config.num_meta3d_blocks)
341
+ ]
342
+ self.blocks = nn.ModuleList(
343
+ [EfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path) for drop_path in drop_paths]
344
+ )
345
+
346
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
347
+ all_attention_outputs = () if output_attentions else None
348
+
349
+ for layer_module in self.blocks:
350
+ if isinstance(hidden_states, tuple):
351
+ hidden_states = hidden_states[0]
352
+
353
+ hidden_states = layer_module(hidden_states, output_attentions)
354
+
355
+ if output_attentions:
356
+ all_attention_outputs = all_attention_outputs + (hidden_states[1],)
357
+
358
+ if output_attentions:
359
+ outputs = (hidden_states[0],) + all_attention_outputs
360
+ return outputs
361
+
362
+ return hidden_states
363
+
364
+
365
+ class EfficientFormerMeta4D(nn.Module):
366
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0):
367
+ super().__init__()
368
+ pool_size = config.pool_size if config.pool_size is not None else 3
369
+ self.token_mixer = EfficientFormerPooling(pool_size=pool_size)
370
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
371
+ self.mlp = EfficientFormerConvMlp(
372
+ config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob
373
+ )
374
+
375
+ self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
376
+ self.use_layer_scale = config.use_layer_scale
377
+ if config.use_layer_scale:
378
+ self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
379
+ self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
380
+
381
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
382
+ outputs = self.token_mixer(hidden_states)
383
+
384
+ if self.use_layer_scale:
385
+ layer_output = hidden_states + self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * outputs)
386
+
387
+ layer_output = layer_output + self.drop_path(
388
+ self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(layer_output)
389
+ )
390
+ else:
391
+ layer_output = hidden_states + self.drop_path(outputs)
392
+ layer_output = layer_output + self.drop_path(self.mlp(layer_output))
393
+
394
+ return layer_output
395
+
396
+
397
+ class EfficientFormerMeta4DLayers(nn.Module):
398
+ def __init__(self, config: EfficientFormerConfig, stage_idx: int):
399
+ super().__init__()
400
+ num_layers = (
401
+ config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
402
+ )
403
+ drop_paths = [
404
+ config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)
405
+ ]
406
+
407
+ self.blocks = nn.ModuleList(
408
+ [
409
+ EfficientFormerMeta4D(config, config.hidden_sizes[stage_idx], drop_path=drop_path)
410
+ for drop_path in drop_paths
411
+ ]
412
+ )
413
+
414
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
415
+ for layer_module in self.blocks:
416
+ hidden_states = layer_module(hidden_states)
417
+ return hidden_states
418
+
419
+
420
+ class EfficientFormerIntermediateStage(nn.Module):
421
+ def __init__(self, config: EfficientFormerConfig, index: int):
422
+ super().__init__()
423
+ self.meta4D_layers = EfficientFormerMeta4DLayers(config, index)
424
+
425
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
426
+ hidden_states = self.meta4D_layers(hidden_states)
427
+ return hidden_states
428
+
429
+
430
+ class EfficientFormerLastStage(nn.Module):
431
+ def __init__(self, config: EfficientFormerConfig):
432
+ super().__init__()
433
+ self.meta4D_layers = EfficientFormerMeta4DLayers(config, -1)
434
+ self.flat = EfficientFormerFlat()
435
+ self.meta3D_layers = EfficientFormerMeta3DLayers(config)
436
+
437
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False) -> Tuple[torch.Tensor]:
438
+ hidden_states = self.meta4D_layers(hidden_states)
439
+ hidden_states = self.flat(hidden_states)
440
+ hidden_states = self.meta3D_layers(hidden_states, output_attentions)
441
+
442
+ return hidden_states
443
+
444
+
445
+ class EfficientFormerEncoder(nn.Module):
446
+ def __init__(self, config: EfficientFormerConfig):
447
+ super().__init__()
448
+ self.config = config
449
+ num_intermediate_stages = len(config.depths) - 1
450
+ downsamples = [
451
+ config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1]
452
+ for i in range(num_intermediate_stages)
453
+ ]
454
+ intermediate_stages = []
455
+
456
+ for i in range(num_intermediate_stages):
457
+ intermediate_stages.append(EfficientFormerIntermediateStage(config, i))
458
+ if downsamples[i]:
459
+ intermediate_stages.append(
460
+ EfficientFormerPatchEmbeddings(config, config.hidden_sizes[i], config.hidden_sizes[i + 1])
461
+ )
462
+
463
+ self.intermediate_stages = nn.ModuleList(intermediate_stages)
464
+ self.last_stage = EfficientFormerLastStage(config)
465
+
466
+ def forward(
467
+ self,
468
+ hidden_states: torch.Tensor,
469
+ output_hidden_states: bool = False,
470
+ output_attentions: bool = False,
471
+ return_dict: bool = True,
472
+ ) -> BaseModelOutput:
473
+ all_hidden_states = () if output_hidden_states else None
474
+ all_self_attentions = () if output_attentions else None
475
+
476
+ if output_hidden_states:
477
+ all_hidden_states = all_hidden_states + (hidden_states,)
478
+
479
+ for layer_module in self.intermediate_stages:
480
+ hidden_states = layer_module(hidden_states)
481
+ if output_hidden_states:
482
+ all_hidden_states = all_hidden_states + (hidden_states,)
483
+
484
+ layer_output = self.last_stage(hidden_states, output_attentions=output_attentions)
485
+
486
+ if output_attentions:
487
+ all_self_attentions = all_self_attentions + layer_output[1:]
488
+
489
+ if output_hidden_states:
490
+ all_hidden_states = all_hidden_states + (layer_output[0],)
491
+
492
+ if not return_dict:
493
+ return tuple(v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None)
494
+
495
+ return BaseModelOutput(
496
+ last_hidden_state=layer_output[0],
497
+ hidden_states=all_hidden_states,
498
+ attentions=all_self_attentions,
499
+ )
500
+
501
+
502
+ class EfficientFormerPreTrainedModel(PreTrainedModel):
503
+ """
504
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
505
+ models.
506
+ """
507
+
508
+ config_class = EfficientFormerConfig
509
+ base_model_prefix = "efficientformer"
510
+ main_input_name = "pixel_values"
511
+ supports_gradient_checkpointing = False
512
+
513
+ def _init_weights(self, module: nn.Module):
514
+ """Initialize the weights"""
515
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
516
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
517
+ if module.bias is not None:
518
+ module.bias.data.zero_()
519
+ elif isinstance(module, nn.LayerNorm):
520
+ module.bias.data.zero_()
521
+ module.weight.data.fill_(1.0)
522
+
523
+
524
+ EFFICIENTFORMER_START_DOCSTRING = r"""
525
+ This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) subclass. Use it as a
526
+ regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.
527
+
528
+ Parameters:
529
+ config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.
530
+ Initializing with a config file does not load the weights associated with the model, only the
531
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
532
+ """
533
+
534
+ EFFICIENTFORMER_INPUTS_DOCSTRING = r"""
535
+ Args:
536
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
537
+ Pixel values. Pixel values can be obtained using [`ViTImageProcessor`]. See
538
+ [`ViTImageProcessor.preprocess`] for details.
539
+ output_attentions (`bool`, *optional*):
540
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
541
+ tensors for more detail.
542
+ output_hidden_states (`bool`, *optional*):
543
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
544
+ more detail.
545
+ return_dict (`bool`, *optional*):
546
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
547
+ """
548
+
549
+
550
+ @add_start_docstrings(
551
+ "The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.",
552
+ EFFICIENTFORMER_START_DOCSTRING,
553
+ )
554
+ class EfficientFormerModel(EfficientFormerPreTrainedModel):
555
+ def __init__(self, config: EfficientFormerConfig):
556
+ super().__init__(config)
557
+ self.config = config
558
+
559
+ self.patch_embed = EfficientFormerConvStem(config, config.hidden_sizes[0])
560
+ self.encoder = EfficientFormerEncoder(config)
561
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
562
+
563
+ # Initialize weights and apply final processing
564
+ self.post_init()
565
+
566
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
567
+ @add_code_sample_docstrings(
568
+ checkpoint=_CHECKPOINT_FOR_DOC,
569
+ output_type=BaseModelOutputWithPooling,
570
+ config_class=_CONFIG_FOR_DOC,
571
+ modality="vision",
572
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
573
+ )
574
+ def forward(
575
+ self,
576
+ pixel_values: Optional[torch.Tensor] = None,
577
+ output_attentions: Optional[bool] = None,
578
+ output_hidden_states: Optional[bool] = None,
579
+ return_dict: Optional[bool] = None,
580
+ ) -> Union[tuple, BaseModelOutput]:
581
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
582
+ output_hidden_states = (
583
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
584
+ )
585
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
586
+
587
+ if pixel_values is None:
588
+ raise ValueError("You have to specify pixel_values")
589
+
590
+ embedding_output = self.patch_embed(pixel_values)
591
+ encoder_outputs = self.encoder(
592
+ embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states
593
+ )
594
+
595
+ sequence_output = encoder_outputs[0]
596
+ sequence_output = self.layernorm(sequence_output)
597
+
598
+ if not return_dict:
599
+ head_outputs = (sequence_output,)
600
+ return head_outputs + encoder_outputs[1:]
601
+
602
+ return BaseModelOutput(
603
+ last_hidden_state=sequence_output,
604
+ hidden_states=encoder_outputs.hidden_states,
605
+ attentions=encoder_outputs.attentions,
606
+ )
607
+
608
+
609
+ @add_start_docstrings(
610
+ """
611
+ EfficientFormer Model transformer with an image classification head on top (a linear layer on top of the final
612
+ hidden state of the [CLS] token) e.g. for ImageNet.
613
+ """,
614
+ EFFICIENTFORMER_START_DOCSTRING,
615
+ )
616
+ class EfficientFormerForImageClassification(EfficientFormerPreTrainedModel):
617
+ def __init__(self, config: EfficientFormerConfig):
618
+ super().__init__(config)
619
+
620
+ self.num_labels = config.num_labels
621
+ self.efficientformer = EfficientFormerModel(config)
622
+
623
+ # Classifier head
624
+ self.classifier = (
625
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
626
+ )
627
+
628
+ # Initialize weights and apply final processing
629
+ self.post_init()
630
+
631
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
632
+ @add_code_sample_docstrings(
633
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
634
+ output_type=ImageClassifierOutput,
635
+ config_class=_CONFIG_FOR_DOC,
636
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
637
+ )
638
+ def forward(
639
+ self,
640
+ pixel_values: Optional[torch.Tensor] = None,
641
+ labels: Optional[torch.Tensor] = None,
642
+ output_attentions: Optional[bool] = None,
643
+ output_hidden_states: Optional[bool] = None,
644
+ return_dict: Optional[bool] = None,
645
+ ) -> Union[tuple, ImageClassifierOutput]:
646
+ r"""
647
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
648
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
649
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
650
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
651
+ """
652
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
653
+
654
+ outputs = self.efficientformer(
655
+ pixel_values,
656
+ output_attentions=output_attentions,
657
+ output_hidden_states=output_hidden_states,
658
+ return_dict=return_dict,
659
+ )
660
+
661
+ sequence_output = outputs[0]
662
+
663
+ logits = self.classifier(sequence_output.mean(-2))
664
+
665
+ loss = None
666
+ if labels is not None:
667
+ if self.config.problem_type is None:
668
+ if self.num_labels == 1:
669
+ self.config.problem_type = "regression"
670
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
671
+ self.config.problem_type = "single_label_classification"
672
+ else:
673
+ self.config.problem_type = "multi_label_classification"
674
+
675
+ if self.config.problem_type == "regression":
676
+ loss_fct = MSELoss()
677
+ if self.num_labels == 1:
678
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
679
+ else:
680
+ loss = loss_fct(logits, labels)
681
+ elif self.config.problem_type == "single_label_classification":
682
+ loss_fct = CrossEntropyLoss()
683
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
684
+ elif self.config.problem_type == "multi_label_classification":
685
+ loss_fct = BCEWithLogitsLoss()
686
+ loss = loss_fct(logits, labels)
687
+
688
+ if not return_dict:
689
+ output = (logits,) + outputs[1:]
690
+ return ((loss,) + output) if loss is not None else output
691
+
692
+ return ImageClassifierOutput(
693
+ loss=loss,
694
+ logits=logits,
695
+ hidden_states=outputs.hidden_states,
696
+ attentions=outputs.attentions,
697
+ )
698
+
699
+
700
+ @dataclass
701
+ class EfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
702
+ """
703
+ Output type of [`EfficientFormerForImageClassificationWithTeacher`].
704
+
705
+ Args:
706
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
707
+ Prediction scores as the average of the cls_logits and distillation logits.
708
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
709
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
710
+ class token).
711
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
712
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
713
+ distillation token).
714
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
715
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
716
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
717
+ plus the initial embedding outputs.
718
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
719
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
720
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
721
+ the self-attention heads.
722
+ """
723
+
724
+ logits: torch.FloatTensor = None
725
+ cls_logits: torch.FloatTensor = None
726
+ distillation_logits: torch.FloatTensor = None
727
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
728
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
729
+
730
+
731
+ @add_start_docstrings(
732
+ """
733
+ EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden
734
+ state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for
735
+ ImageNet.
736
+
737
+ <Tip warning={true}>
738
+
739
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
740
+ supported.
741
+
742
+ </Tip>
743
+ """,
744
+ EFFICIENTFORMER_START_DOCSTRING,
745
+ )
746
+ class EfficientFormerForImageClassificationWithTeacher(EfficientFormerPreTrainedModel):
747
+ def __init__(self, config: EfficientFormerConfig):
748
+ super().__init__(config)
749
+
750
+ self.num_labels = config.num_labels
751
+ self.efficientformer = EfficientFormerModel(config)
752
+
753
+ # Classifier head
754
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
755
+ # Distillation head
756
+ self.distillation_classifier = (
757
+ nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
758
+ )
759
+
760
+ # Initialize weights and apply final processing
761
+ self.post_init()
762
+
763
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
764
+ @add_code_sample_docstrings(
765
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
766
+ output_type=EfficientFormerForImageClassificationWithTeacherOutput,
767
+ config_class=_CONFIG_FOR_DOC,
768
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
769
+ )
770
+ def forward(
771
+ self,
772
+ pixel_values: Optional[torch.Tensor] = None,
773
+ output_attentions: Optional[bool] = None,
774
+ output_hidden_states: Optional[bool] = None,
775
+ return_dict: Optional[bool] = None,
776
+ ) -> Union[tuple, EfficientFormerForImageClassificationWithTeacherOutput]:
777
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
778
+ outputs = self.efficientformer(
779
+ pixel_values,
780
+ output_attentions=output_attentions,
781
+ output_hidden_states=output_hidden_states,
782
+ return_dict=return_dict,
783
+ )
784
+
785
+ sequence_output = outputs[0]
786
+
787
+ cls_logits = self.classifier(sequence_output.mean(-2))
788
+ distillation_logits = self.distillation_classifier(sequence_output.mean(-2))
789
+
790
+ # during inference, return the average of both classifier predictions
791
+ logits = (cls_logits + distillation_logits) / 2
792
+
793
+ if not return_dict:
794
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
795
+ return output
796
+
797
+ return EfficientFormerForImageClassificationWithTeacherOutput(
798
+ logits=logits,
799
+ cls_logits=cls_logits,
800
+ distillation_logits=distillation_logits,
801
+ hidden_states=outputs.hidden_states,
802
+ attentions=outputs.attentions,
803
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/efficientformer/modeling_tf_efficientformer.py ADDED
@@ -0,0 +1,1193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Snapchat Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TensorFlow EfficientFormer model."""
16
+
17
+ import itertools
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import tensorflow as tf
22
+
23
+ from ...activations_tf import ACT2FN
24
+ from ...modeling_tf_outputs import (
25
+ TFBaseModelOutput,
26
+ TFBaseModelOutputWithPooling,
27
+ TFImageClassifierOutput,
28
+ )
29
+ from ...modeling_tf_utils import (
30
+ TFPreTrainedModel,
31
+ TFSequenceClassificationLoss,
32
+ get_initializer,
33
+ keras,
34
+ keras_serializable,
35
+ unpack_inputs,
36
+ )
37
+ from ...tf_utils import shape_list, stable_softmax
38
+ from ...utils import (
39
+ ModelOutput,
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ )
45
+ from .configuration_efficientformer import EfficientFormerConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ # General docstring
51
+ _CONFIG_FOR_DOC = "EfficientFormerConfig"
52
+
53
+ # Base docstring
54
+ _CHECKPOINT_FOR_DOC = "snap-research/efficientformer-l1-300"
55
+ _EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
56
+
57
+ # Image classification docstring
58
+ _IMAGE_CLASS_CHECKPOINT = "snap-research/efficientformer-l1-300"
59
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_281"
60
+
61
+
62
+ from ..deprecated._archive_maps import TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
63
+
64
+
65
+ class TFEfficientFormerPatchEmbeddings(keras.layers.Layer):
66
+ """
67
+ This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
68
+ height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
69
+ """
70
+
71
+ def __init__(
72
+ self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool = True, **kwargs
73
+ ) -> None:
74
+ super().__init__(**kwargs)
75
+ self.num_channels = num_channels
76
+
77
+ self.padding = keras.layers.ZeroPadding2D(padding=config.downsample_pad)
78
+ self.projection = keras.layers.Conv2D(
79
+ filters=embed_dim,
80
+ kernel_size=config.downsample_patch_size,
81
+ strides=config.downsample_stride,
82
+ padding="valid",
83
+ name="projection",
84
+ )
85
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
86
+ self.norm = (
87
+ keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="norm")
88
+ if apply_norm
89
+ else tf.identity
90
+ )
91
+ self.embed_dim = embed_dim
92
+
93
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
94
+ tf.debugging.assert_shapes(
95
+ [(pixel_values, (..., None, None, self.num_channels))],
96
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
97
+ )
98
+ embeddings = self.projection(self.padding(pixel_values))
99
+ embeddings = self.norm(embeddings, training=training)
100
+ return embeddings
101
+
102
+ def build(self, input_shape=None):
103
+ if self.built:
104
+ return
105
+ self.built = True
106
+ if getattr(self, "projection", None) is not None:
107
+ with tf.name_scope(self.projection.name):
108
+ self.projection.build([None, None, None, self.num_channels])
109
+ if getattr(self, "norm", None) is not None:
110
+ if hasattr(self.norm, "name"):
111
+ with tf.name_scope(self.norm.name):
112
+ self.norm.build([None, None, None, self.embed_dim])
113
+
114
+
115
+ class TFEfficientFormerSelfAttention(keras.layers.Layer):
116
+ def __init__(
117
+ self,
118
+ dim: int,
119
+ key_dim: int,
120
+ num_heads: int,
121
+ attention_ratio: int,
122
+ resolution: int,
123
+ config: EfficientFormerConfig,
124
+ **kwargs,
125
+ ):
126
+ super().__init__(**kwargs)
127
+
128
+ self.num_heads = num_heads
129
+ self.key_dim = key_dim
130
+ self.attention_ratio = attention_ratio
131
+ self.scale = key_dim**-0.5
132
+ self.total_key_dim = key_dim * num_heads
133
+ self.expanded_key_dim = int(attention_ratio * key_dim)
134
+ self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
135
+ hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
136
+
137
+ self.qkv = keras.layers.Dense(
138
+ units=hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="qkv"
139
+ )
140
+ self.projection = keras.layers.Dense(
141
+ units=dim, kernel_initializer=get_initializer(config.initializer_range), name="projection"
142
+ )
143
+ self.resolution = resolution
144
+ self.dim = dim
145
+
146
+ def build(self, input_shape: tf.TensorShape) -> None:
147
+ points = list(itertools.product(range(self.resolution), range(self.resolution)))
148
+ num_points = len(points)
149
+ attention_offsets = {}
150
+
151
+ idxs = []
152
+
153
+ for point_1 in points:
154
+ for point_2 in points:
155
+ offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
156
+ if offset not in attention_offsets:
157
+ attention_offsets[offset] = len(attention_offsets)
158
+ idxs.append(attention_offsets[offset])
159
+
160
+ self.attention_biases = self.add_weight(
161
+ shape=(self.num_heads, len(attention_offsets)),
162
+ initializer=keras.initializers.zeros(),
163
+ trainable=True,
164
+ name="attention_biases",
165
+ )
166
+ self.attention_bias_idxs = self.add_weight(
167
+ shape=(num_points, num_points),
168
+ trainable=False,
169
+ dtype=tf.int32,
170
+ name="attention_bias_idxs",
171
+ )
172
+
173
+ self.attention_bias_idxs.assign(tf.reshape(tf.cast(idxs, dtype=tf.int32), (num_points, num_points)))
174
+
175
+ if self.built:
176
+ return
177
+ self.built = True
178
+ if getattr(self, "qkv", None) is not None:
179
+ with tf.name_scope(self.qkv.name):
180
+ self.qkv.build([None, None, self.dim])
181
+ if getattr(self, "projection", None) is not None:
182
+ with tf.name_scope(self.projection.name):
183
+ self.projection.build([None, None, self.total_expanded_key_dim])
184
+
185
+ def call(
186
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
187
+ ) -> Tuple[tf.Tensor]:
188
+ batch_size, sequence_length, *_ = shape_list(hidden_states)
189
+ qkv = self.qkv(inputs=hidden_states)
190
+
191
+ query_layer, key_layer, value_layer = tf.split(
192
+ tf.reshape(tensor=qkv, shape=(batch_size, sequence_length, self.num_heads, -1)),
193
+ num_or_size_splits=[self.key_dim, self.key_dim, self.expanded_key_dim],
194
+ axis=3,
195
+ )
196
+
197
+ query_layer = tf.transpose(query_layer, perm=[0, 2, 1, 3])
198
+ key_layer = tf.transpose(key_layer, perm=[0, 2, 1, 3])
199
+ value_layer = tf.transpose(value_layer, perm=[0, 2, 1, 3])
200
+
201
+ attention_probs = tf.matmul(query_layer, tf.transpose(key_layer, perm=[0, 1, 3, 2]))
202
+ scale = tf.cast(self.scale, dtype=attention_probs.dtype)
203
+ attention_probs = tf.multiply(attention_probs, scale)
204
+
205
+ attention_biases = tf.gather(params=self.attention_biases, indices=self.attention_bias_idxs, axis=1)
206
+ attention_probs = attention_probs + attention_biases
207
+ attention_probs = stable_softmax(logits=attention_probs, axis=-1)
208
+
209
+ context_layer = tf.matmul(attention_probs, value_layer)
210
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
211
+
212
+ context_layer = tf.reshape(
213
+ tensor=context_layer, shape=(batch_size, sequence_length, self.total_expanded_key_dim)
214
+ )
215
+ context_layer = self.projection(context_layer)
216
+
217
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
218
+
219
+ return outputs
220
+
221
+
222
+ class TFEfficientFormerConvStem(keras.layers.Layer):
223
+ def __init__(self, config: EfficientFormerConfig, out_channels: int, **kwargs):
224
+ super().__init__(**kwargs)
225
+
226
+ self.padding = keras.layers.ZeroPadding2D(padding=1)
227
+ self.convolution1 = keras.layers.Conv2D(
228
+ filters=out_channels // 2, kernel_size=3, strides=2, padding="valid", name="convolution1"
229
+ )
230
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
231
+ self.batchnorm_before = keras.layers.BatchNormalization(
232
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_before"
233
+ )
234
+
235
+ self.convolution2 = keras.layers.Conv2D(
236
+ filters=out_channels,
237
+ kernel_size=3,
238
+ strides=2,
239
+ padding="valid",
240
+ name="convolution2",
241
+ )
242
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
243
+ self.batchnorm_after = keras.layers.BatchNormalization(
244
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_after"
245
+ )
246
+
247
+ self.activation = keras.layers.Activation(activation=keras.activations.relu, name="activation")
248
+ self.out_channels = out_channels
249
+ self.config = config
250
+
251
+ def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
252
+ features = self.batchnorm_before(self.convolution1(self.padding(pixel_values)), training=training)
253
+ features = self.activation(features)
254
+ features = self.batchnorm_after(self.convolution2(self.padding(features)), training=training)
255
+ features = self.activation(features)
256
+ return features
257
+
258
+ def build(self, input_shape=None):
259
+ if self.built:
260
+ return
261
+ self.built = True
262
+ if getattr(self, "convolution1", None) is not None:
263
+ with tf.name_scope(self.convolution1.name):
264
+ self.convolution1.build([None, None, None, self.config.num_channels])
265
+ if getattr(self, "batchnorm_before", None) is not None:
266
+ with tf.name_scope(self.batchnorm_before.name):
267
+ self.batchnorm_before.build([None, None, None, self.out_channels // 2])
268
+ if getattr(self, "convolution2", None) is not None:
269
+ with tf.name_scope(self.convolution2.name):
270
+ self.convolution2.build([None, None, None, self.out_channels // 2])
271
+ if getattr(self, "batchnorm_after", None) is not None:
272
+ with tf.name_scope(self.batchnorm_after.name):
273
+ self.batchnorm_after.build([None, None, None, self.out_channels])
274
+ if getattr(self, "activation", None) is not None:
275
+ with tf.name_scope(self.activation.name):
276
+ self.activation.build(None)
277
+
278
+
279
+ class TFEfficientFormerPooling(keras.layers.Layer):
280
+ def __init__(self, pool_size: int, **kwargs):
281
+ super().__init__(**kwargs)
282
+ self.pool = keras.layers.AveragePooling2D(pool_size=pool_size, strides=1, padding="same")
283
+
284
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
285
+ output = self.pool(hidden_states)
286
+ output = output - hidden_states
287
+ return output
288
+
289
+
290
+ class TFEfficientFormerDenseMlp(keras.layers.Layer):
291
+ def __init__(
292
+ self,
293
+ config: EfficientFormerConfig,
294
+ in_features: int,
295
+ hidden_features: Optional[int] = None,
296
+ out_features: Optional[int] = None,
297
+ **kwargs,
298
+ ):
299
+ super().__init__(**kwargs)
300
+ out_features = out_features or in_features
301
+ hidden_features = hidden_features or in_features
302
+
303
+ self.linear_in = keras.layers.Dense(
304
+ units=hidden_features, kernel_initializer=get_initializer(config.initializer_range), name="linear_in"
305
+ )
306
+ self.activation = ACT2FN[config.hidden_act]
307
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
308
+
309
+ self.linear_out = keras.layers.Dense(
310
+ units=out_features, kernel_initializer=get_initializer(config.initializer_range), name="linear_out"
311
+ )
312
+ self.hidden_features = hidden_features
313
+ self.in_features = in_features
314
+
315
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
316
+ hidden_states = self.linear_in(inputs=hidden_states)
317
+ hidden_states = self.activation(hidden_states)
318
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
319
+ hidden_states = self.linear_out(inputs=hidden_states)
320
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
321
+
322
+ return hidden_states
323
+
324
+ def build(self, input_shape=None):
325
+ if self.built:
326
+ return
327
+ self.built = True
328
+ if getattr(self, "linear_in", None) is not None:
329
+ with tf.name_scope(self.linear_in.name):
330
+ self.linear_in.build([None, None, self.in_features])
331
+ if getattr(self, "linear_out", None) is not None:
332
+ with tf.name_scope(self.linear_out.name):
333
+ self.linear_out.build([None, None, self.hidden_features])
334
+
335
+
336
+ class TFEfficientFormerConvMlp(keras.layers.Layer):
337
+ def __init__(
338
+ self,
339
+ config: EfficientFormerConfig,
340
+ in_features: int,
341
+ hidden_features: Optional[int] = None,
342
+ out_features: Optional[int] = None,
343
+ drop: float = 0.0,
344
+ **kwargs,
345
+ ):
346
+ super().__init__(**kwargs)
347
+ out_features = out_features or in_features
348
+ hidden_features = hidden_features or in_features
349
+
350
+ self.convolution1 = keras.layers.Conv2D(
351
+ filters=hidden_features,
352
+ kernel_size=1,
353
+ name="convolution1",
354
+ padding="valid",
355
+ )
356
+
357
+ self.activation = ACT2FN[config.hidden_act]
358
+
359
+ self.convolution2 = keras.layers.Conv2D(
360
+ filters=out_features,
361
+ kernel_size=1,
362
+ name="convolution2",
363
+ padding="valid",
364
+ )
365
+
366
+ self.dropout = keras.layers.Dropout(rate=drop)
367
+
368
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
369
+ self.batchnorm_before = keras.layers.BatchNormalization(
370
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_before"
371
+ )
372
+ # Use same default momentum and epsilon as PyTorch equivalent for BatchNormalization
373
+ self.batchnorm_after = keras.layers.BatchNormalization(
374
+ axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name="batchnorm_after"
375
+ )
376
+ self.hidden_features = hidden_features
377
+ self.in_features = in_features
378
+ self.out_features = out_features
379
+
380
+ def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
381
+ hidden_state = self.convolution1(hidden_state)
382
+ hidden_state = self.batchnorm_before(hidden_state, training=training)
383
+ hidden_state = self.activation(hidden_state)
384
+ hidden_state = self.dropout(hidden_state, training=training)
385
+ hidden_state = self.convolution2(hidden_state)
386
+ hidden_state = self.batchnorm_after(hidden_state, training=training)
387
+ hidden_state = self.dropout(hidden_state, training=training)
388
+ return hidden_state
389
+
390
+ def build(self, input_shape=None):
391
+ if self.built:
392
+ return
393
+ self.built = True
394
+ if getattr(self, "convolution1", None) is not None:
395
+ with tf.name_scope(self.convolution1.name):
396
+ self.convolution1.build([None, None, None, self.in_features])
397
+ if getattr(self, "convolution2", None) is not None:
398
+ with tf.name_scope(self.convolution2.name):
399
+ self.convolution2.build([None, None, None, self.hidden_features])
400
+ if getattr(self, "batchnorm_before", None) is not None:
401
+ with tf.name_scope(self.batchnorm_before.name):
402
+ self.batchnorm_before.build([None, None, None, self.hidden_features])
403
+ if getattr(self, "batchnorm_after", None) is not None:
404
+ with tf.name_scope(self.batchnorm_after.name):
405
+ self.batchnorm_after.build([None, None, None, self.out_features])
406
+
407
+
408
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->EfficientFormer
409
+ class TFEfficientFormerDropPath(keras.layers.Layer):
410
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
411
+ References:
412
+ (1) github.com:rwightman/pytorch-image-models
413
+ """
414
+
415
+ def __init__(self, drop_path: float, **kwargs):
416
+ super().__init__(**kwargs)
417
+ self.drop_path = drop_path
418
+
419
+ def call(self, x: tf.Tensor, training=None):
420
+ if training:
421
+ keep_prob = 1 - self.drop_path
422
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
423
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
424
+ random_tensor = tf.floor(random_tensor)
425
+ return (x / keep_prob) * random_tensor
426
+ return x
427
+
428
+
429
+ class TFEfficientFormerFlat(keras.layers.Layer):
430
+ def __init__(self, **kwargs):
431
+ super().__init__(**kwargs)
432
+
433
+ def call(self, hidden_states: tf.Tensor) -> Tuple[tf.Tensor]:
434
+ batch_size, _, _, in_channels = shape_list(hidden_states)
435
+ hidden_states = tf.reshape(hidden_states, shape=[batch_size, -1, in_channels])
436
+ return hidden_states
437
+
438
+
439
+ class TFEfficientFormerMeta3D(keras.layers.Layer):
440
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0, **kwargs):
441
+ super().__init__(**kwargs)
442
+
443
+ self.token_mixer = TFEfficientFormerSelfAttention(
444
+ dim=config.dim,
445
+ key_dim=config.key_dim,
446
+ num_heads=config.num_attention_heads,
447
+ attention_ratio=config.attention_ratio,
448
+ resolution=config.resolution,
449
+ name="token_mixer",
450
+ config=config,
451
+ )
452
+ self.dim = dim
453
+ self.config = config
454
+
455
+ self.layernorm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm1")
456
+ self.layernorm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm2")
457
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
458
+ self.mlp = TFEfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim, name="mlp")
459
+
460
+ # Using `layers.Activation` instead of `tf.identity` to better control `training' behavior.
461
+ self.drop_path = (
462
+ TFEfficientFormerDropPath(drop_path)
463
+ if drop_path > 0.0
464
+ else keras.layers.Activation("linear", name="drop_path")
465
+ )
466
+ self.config = config
467
+
468
+ def build(self, input_shape=None):
469
+ self.layer_scale_1 = None
470
+ self.layer_scale_2 = None
471
+
472
+ if self.config.use_layer_scale:
473
+ self.layer_scale_1 = self.add_weight(
474
+ shape=(self.dim,),
475
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
476
+ trainable=True,
477
+ name="layer_scale_1",
478
+ )
479
+ self.layer_scale_2 = self.add_weight(
480
+ shape=(self.dim,),
481
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
482
+ trainable=True,
483
+ name="layer_scale_2",
484
+ )
485
+
486
+ if self.built:
487
+ return
488
+ self.built = True
489
+ if getattr(self, "token_mixer", None) is not None:
490
+ with tf.name_scope(self.token_mixer.name):
491
+ self.token_mixer.build(None)
492
+ if getattr(self, "layernorm1", None) is not None:
493
+ with tf.name_scope(self.layernorm1.name):
494
+ self.layernorm1.build([None, None, self.dim])
495
+ if getattr(self, "layernorm2", None) is not None:
496
+ with tf.name_scope(self.layernorm2.name):
497
+ self.layernorm2.build([None, None, self.dim])
498
+ if getattr(self, "mlp", None) is not None:
499
+ with tf.name_scope(self.mlp.name):
500
+ self.mlp.build(None)
501
+ if getattr(self, "drop_path", None) is not None:
502
+ with tf.name_scope(self.drop_path.name):
503
+ self.drop_path.build(None)
504
+
505
+ def call(
506
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
507
+ ) -> Tuple[tf.Tensor]:
508
+ self_attention_outputs = self.token_mixer(
509
+ hidden_states=self.layernorm1(hidden_states, training=training),
510
+ output_attentions=output_attentions,
511
+ training=training,
512
+ )
513
+
514
+ attention_output = self_attention_outputs[0]
515
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
516
+
517
+ if self.config.use_layer_scale:
518
+ layer_output = hidden_states + self.drop_path(
519
+ tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * attention_output,
520
+ training=training,
521
+ )
522
+ layer_output = layer_output + self.drop_path(
523
+ tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0)
524
+ * self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training),
525
+ training=training,
526
+ )
527
+ else:
528
+ layer_output = hidden_states + self.drop_path(attention_output, training=training)
529
+ layer_output = layer_output + self.drop_path(
530
+ self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training),
531
+ training=training,
532
+ )
533
+
534
+ outputs = (layer_output,) + outputs
535
+
536
+ return outputs
537
+
538
+
539
+ class TFEfficientFormerMeta3DLayers(keras.layers.Layer):
540
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
541
+ super().__init__(**kwargs)
542
+ drop_paths = [
543
+ config.drop_path_rate * (block_idx + sum(config.depths[:-1]))
544
+ for block_idx in range(config.num_meta3d_blocks)
545
+ ]
546
+ self.blocks = [
547
+ TFEfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path, name=f"blocks.{i}")
548
+ for i, drop_path in enumerate(drop_paths)
549
+ ]
550
+
551
+ def call(
552
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
553
+ ) -> Tuple[tf.Tensor]:
554
+ all_attention_outputs = () if output_attentions else None
555
+
556
+ for i, layer_module in enumerate(self.blocks):
557
+ if isinstance(hidden_states, tuple):
558
+ hidden_states = hidden_states[0]
559
+
560
+ hidden_states = layer_module(
561
+ hidden_states=hidden_states, output_attentions=output_attentions, training=training
562
+ )
563
+ if output_attentions:
564
+ all_attention_outputs = all_attention_outputs + (hidden_states[1],)
565
+
566
+ if output_attentions:
567
+ outputs = (hidden_states[0],) + all_attention_outputs
568
+ return outputs
569
+
570
+ return hidden_states
571
+
572
+ def build(self, input_shape=None):
573
+ if self.built:
574
+ return
575
+ self.built = True
576
+ if getattr(self, "blocks", None) is not None:
577
+ for layer in self.blocks:
578
+ with tf.name_scope(layer.name):
579
+ layer.build(None)
580
+
581
+
582
+ class TFEfficientFormerMeta4D(keras.layers.Layer):
583
+ def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float = 0.0, **kwargs):
584
+ super().__init__(**kwargs)
585
+ pool_size = config.pool_size if config.pool_size is not None else 3
586
+ self.token_mixer = TFEfficientFormerPooling(pool_size=pool_size, name="token_mixer")
587
+ self.dim = dim
588
+ mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
589
+ self.mlp = TFEfficientFormerConvMlp(
590
+ config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob, name="mlp"
591
+ )
592
+
593
+ self.drop_path = (
594
+ TFEfficientFormerDropPath(drop_path, name="drop_path")
595
+ if drop_path > 0.0
596
+ else keras.layers.Activation("linear", name="drop_path")
597
+ )
598
+ self.config = config
599
+
600
+ def build(self, input_shape=None):
601
+ self.layer_scale_1 = None
602
+ self.layer_scale_2 = None
603
+
604
+ if self.config.use_layer_scale:
605
+ self.layer_scale_1 = self.add_weight(
606
+ shape=(self.dim),
607
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
608
+ trainable=True,
609
+ name="layer_scale_1",
610
+ )
611
+ self.layer_scale_2 = self.add_weight(
612
+ shape=(self.dim),
613
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
614
+ trainable=True,
615
+ name="layer_scale_2",
616
+ )
617
+
618
+ if self.built:
619
+ return
620
+ self.built = True
621
+ if getattr(self, "token_mixer", None) is not None:
622
+ with tf.name_scope(self.token_mixer.name):
623
+ self.token_mixer.build(None)
624
+ if getattr(self, "mlp", None) is not None:
625
+ with tf.name_scope(self.mlp.name):
626
+ self.mlp.build(None)
627
+ if getattr(self, "drop_path", None) is not None:
628
+ with tf.name_scope(self.drop_path.name):
629
+ self.drop_path.build(None)
630
+
631
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
632
+ outputs = self.token_mixer(hidden_states)
633
+
634
+ if self.config.use_layer_scale:
635
+ layer_output = hidden_states + self.drop_path(
636
+ tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * outputs,
637
+ training=training,
638
+ )
639
+
640
+ layer_output = layer_output + self.drop_path(
641
+ tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0)
642
+ * self.mlp(hidden_state=layer_output, training=training),
643
+ training=training,
644
+ )
645
+
646
+ else:
647
+ layer_output = hidden_states + self.drop_path(outputs, training=training)
648
+ layer_output = layer_output + self.drop_path(
649
+ self.mlp(hidden_state=layer_output, training=training), training=training
650
+ )
651
+
652
+ return layer_output
653
+
654
+
655
+ class TFEfficientFormerMeta4DLayers(keras.layers.Layer):
656
+ def __init__(self, config: EfficientFormerConfig, stage_idx: int, **kwargs):
657
+ super().__init__(**kwargs)
658
+ num_layers = (
659
+ config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
660
+ )
661
+ drop_paths = [
662
+ config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)
663
+ ]
664
+
665
+ self.blocks = [
666
+ TFEfficientFormerMeta4D(
667
+ config=config, dim=config.hidden_sizes[stage_idx], drop_path=drop_paths[i], name=f"blocks.{i}"
668
+ )
669
+ for i in range(len(drop_paths))
670
+ ]
671
+
672
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
673
+ for layer_module in self.blocks:
674
+ hidden_states = layer_module(hidden_states=hidden_states, training=training)
675
+ return hidden_states
676
+
677
+ def build(self, input_shape=None):
678
+ if self.built:
679
+ return
680
+ self.built = True
681
+ if getattr(self, "blocks", None) is not None:
682
+ for layer in self.blocks:
683
+ with tf.name_scope(layer.name):
684
+ layer.build(None)
685
+
686
+
687
+ class TFEfficientFormerIntermediateStage(keras.layers.Layer):
688
+ def __init__(self, config: EfficientFormerConfig, index: int, **kwargs):
689
+ super().__init__(**kwargs)
690
+ self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=index, name="meta4D_layers")
691
+
692
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor]:
693
+ hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
694
+ return hidden_states
695
+
696
+ def build(self, input_shape=None):
697
+ if self.built:
698
+ return
699
+ self.built = True
700
+ if getattr(self, "meta4D_layers", None) is not None:
701
+ with tf.name_scope(self.meta4D_layers.name):
702
+ self.meta4D_layers.build(None)
703
+
704
+
705
+ class TFEfficientFormerLastStage(keras.layers.Layer):
706
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
707
+ super().__init__(**kwargs)
708
+ self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=-1, name="meta4D_layers")
709
+ self.flat = TFEfficientFormerFlat(name="flat")
710
+ self.meta3D_layers = TFEfficientFormerMeta3DLayers(config, name="meta3D_layers")
711
+
712
+ def call(
713
+ self, hidden_states: tf.Tensor, output_attentions: bool = False, training: bool = False
714
+ ) -> Tuple[tf.Tensor]:
715
+ hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
716
+ hidden_states = self.flat(hidden_states=hidden_states)
717
+ hidden_states = self.meta3D_layers(
718
+ hidden_states=hidden_states, output_attentions=output_attentions, training=training
719
+ )
720
+
721
+ return hidden_states
722
+
723
+ def build(self, input_shape=None):
724
+ if self.built:
725
+ return
726
+ self.built = True
727
+ if getattr(self, "meta4D_layers", None) is not None:
728
+ with tf.name_scope(self.meta4D_layers.name):
729
+ self.meta4D_layers.build(None)
730
+ if getattr(self, "flat", None) is not None:
731
+ with tf.name_scope(self.flat.name):
732
+ self.flat.build(None)
733
+ if getattr(self, "meta3D_layers", None) is not None:
734
+ with tf.name_scope(self.meta3D_layers.name):
735
+ self.meta3D_layers.build(None)
736
+
737
+
738
+ class TFEfficientFormerEncoder(keras.layers.Layer):
739
+ def __init__(self, config: EfficientFormerConfig, **kwargs):
740
+ super().__init__(**kwargs)
741
+
742
+ self.config = config
743
+ num_intermediate_stages = len(config.depths) - 1
744
+ downsamples = [
745
+ config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1]
746
+ for i in range(num_intermediate_stages)
747
+ ]
748
+
749
+ intermediate_stages = []
750
+ layer_count = -1
751
+ for i in range(num_intermediate_stages):
752
+ layer_count += 1
753
+ intermediate_stages.append(
754
+ TFEfficientFormerIntermediateStage(config, i, name=f"intermediate_stages.{layer_count}")
755
+ )
756
+ if downsamples[i]:
757
+ layer_count += 1
758
+ intermediate_stages.append(
759
+ TFEfficientFormerPatchEmbeddings(
760
+ config,
761
+ config.hidden_sizes[i],
762
+ config.hidden_sizes[i + 1],
763
+ name=f"intermediate_stages.{layer_count}",
764
+ )
765
+ )
766
+ self.intermediate_stages = intermediate_stages
767
+ self.last_stage = TFEfficientFormerLastStage(config, name="last_stage")
768
+
769
+ def call(
770
+ self,
771
+ hidden_states: tf.Tensor,
772
+ output_hidden_states: bool,
773
+ output_attentions: bool,
774
+ return_dict: bool,
775
+ training: bool = False,
776
+ ) -> TFBaseModelOutput:
777
+ all_hidden_states = () if output_hidden_states else None
778
+ all_self_attentions = () if output_attentions else None
779
+
780
+ if output_hidden_states:
781
+ all_hidden_states = all_hidden_states + (hidden_states,)
782
+
783
+ for layer_module in self.intermediate_stages:
784
+ hidden_states = layer_module(hidden_states, training=training)
785
+
786
+ if output_hidden_states:
787
+ all_hidden_states = all_hidden_states + (hidden_states,)
788
+
789
+ layer_output = self.last_stage(hidden_states, output_attentions=output_attentions, training=training)
790
+
791
+ if output_attentions:
792
+ all_self_attentions = all_self_attentions + layer_output[1:]
793
+
794
+ if output_hidden_states:
795
+ all_hidden_states = all_hidden_states + (layer_output[0],)
796
+
797
+ if not return_dict:
798
+ return tuple(v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None)
799
+
800
+ return TFBaseModelOutput(
801
+ last_hidden_state=layer_output[0],
802
+ hidden_states=all_hidden_states,
803
+ attentions=all_self_attentions,
804
+ )
805
+
806
+ def build(self, input_shape=None):
807
+ if self.built:
808
+ return
809
+ self.built = True
810
+ if getattr(self, "last_stage", None) is not None:
811
+ with tf.name_scope(self.last_stage.name):
812
+ self.last_stage.build(None)
813
+ for layer in self.intermediate_stages:
814
+ with tf.name_scope(layer.name):
815
+ layer.build(None)
816
+
817
+
818
+ @keras_serializable
819
+ class TFEfficientFormerMainLayer(keras.layers.Layer):
820
+ config_class = EfficientFormerConfig
821
+
822
+ def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
823
+ super().__init__(**kwargs)
824
+ self.config = config
825
+
826
+ self.patch_embed = TFEfficientFormerConvStem(config, config.hidden_sizes[0], name="patch_embed")
827
+ self.encoder = TFEfficientFormerEncoder(config, name="encoder")
828
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
829
+
830
+ @unpack_inputs
831
+ def call(
832
+ self,
833
+ pixel_values: Optional[tf.Tensor] = None,
834
+ output_attentions: Optional[tf.Tensor] = None,
835
+ output_hidden_states: Optional[tf.Tensor] = None,
836
+ return_dict: Optional[bool] = None,
837
+ training: bool = False,
838
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor, ...]]:
839
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
840
+
841
+ output_hidden_states = (
842
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
843
+ )
844
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
845
+
846
+ if pixel_values is None:
847
+ raise ValueError("You have to specify pixel_values")
848
+
849
+ # When running on CPU, keras.layers.Conv2D and keras.layers.AveragePool2D do not
850
+ # support channels first NCHW format. A number of blocks contain both.
851
+ # So change the input format from (batch_size, num_channels, height, width) to
852
+ # (batch_size, height, width, num_channels) here.
853
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
854
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
855
+ embedding_output = self.patch_embed(pixel_values, training=training)
856
+
857
+ encoder_outputs = self.encoder(
858
+ hidden_states=embedding_output,
859
+ output_attentions=output_attentions,
860
+ output_hidden_states=output_hidden_states,
861
+ return_dict=return_dict,
862
+ training=training,
863
+ )
864
+
865
+ sequence_output = encoder_outputs[0]
866
+ sequence_output = self.layernorm(sequence_output, training=training)
867
+
868
+ # Change the hidden states from (batch_size, height, width, num_channels) to
869
+ # (batch_size, num_channels, height, width).
870
+ # The hidden states are in (batch_size, height, width, num_channels)
871
+ # shape after all stages except the MB3D blocks.
872
+ if output_hidden_states:
873
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1][:-1]]) + (
874
+ encoder_outputs[1][-1],
875
+ )
876
+
877
+ if not return_dict:
878
+ head_outputs = (sequence_output,)
879
+ return head_outputs + encoder_outputs[1:]
880
+
881
+ return TFBaseModelOutput(
882
+ last_hidden_state=sequence_output,
883
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
884
+ attentions=encoder_outputs.attentions,
885
+ )
886
+
887
+ def build(self, input_shape=None):
888
+ if self.built:
889
+ return
890
+ self.built = True
891
+ if getattr(self, "patch_embed", None) is not None:
892
+ with tf.name_scope(self.patch_embed.name):
893
+ self.patch_embed.build(None)
894
+ if getattr(self, "encoder", None) is not None:
895
+ with tf.name_scope(self.encoder.name):
896
+ self.encoder.build(None)
897
+ if getattr(self, "layernorm", None) is not None:
898
+ with tf.name_scope(self.layernorm.name):
899
+ self.layernorm.build([None, None, self.config.hidden_sizes[-1]])
900
+
901
+
902
+ class TFEfficientFormerPreTrainedModel(TFPreTrainedModel):
903
+ """
904
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
905
+ models.
906
+ """
907
+
908
+ config_class = EfficientFormerConfig
909
+ base_model_prefix = "efficientformer"
910
+ main_input_name = "pixel_values"
911
+
912
+
913
+ EFFICIENTFORMER_START_DOCSTRING = r"""
914
+ This model is a TensorFlow
915
+ [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Use it as a regular
916
+ TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior.
917
+
918
+
919
+ Parameters:
920
+ config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.
921
+ Initializing with a config file does not load the weights associated with the model, only the
922
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
923
+ """
924
+
925
+ EFFICIENTFORMER_INPUTS_DOCSTRING = r"""
926
+ Args:
927
+ pixel_values ((`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
928
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
929
+ [`EfficientFormerImageProcessor.__call__`] for details.
930
+ output_attentions (`bool`, *optional*):
931
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
932
+ tensors for more detail.
933
+ output_hidden_states (`bool`, *optional*):
934
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
935
+ more detail.
936
+ return_dict (`bool`, *optional*):
937
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
938
+ """
939
+
940
+
941
+ @add_start_docstrings(
942
+ "The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.",
943
+ EFFICIENTFORMER_START_DOCSTRING,
944
+ )
945
+ class TFEfficientFormerModel(TFEfficientFormerPreTrainedModel):
946
+ def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
947
+ super().__init__(config, **kwargs)
948
+
949
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
950
+
951
+ @unpack_inputs
952
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
953
+ @add_code_sample_docstrings(
954
+ checkpoint=_CHECKPOINT_FOR_DOC,
955
+ output_type=TFBaseModelOutputWithPooling,
956
+ config_class=_CONFIG_FOR_DOC,
957
+ modality="vision",
958
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
959
+ )
960
+ def call(
961
+ self,
962
+ pixel_values: Optional[tf.Tensor] = None,
963
+ output_attentions: Optional[bool] = None,
964
+ output_hidden_states: Optional[bool] = None,
965
+ return_dict: Optional[bool] = None,
966
+ training: bool = False,
967
+ ) -> Union[Tuple, TFBaseModelOutput]:
968
+ outputs = self.efficientformer(
969
+ pixel_values=pixel_values,
970
+ output_attentions=output_attentions,
971
+ output_hidden_states=output_hidden_states,
972
+ return_dict=return_dict,
973
+ training=training,
974
+ )
975
+ return outputs
976
+
977
+ def build(self, input_shape=None):
978
+ if self.built:
979
+ return
980
+ self.built = True
981
+ if getattr(self, "efficientformer", None) is not None:
982
+ with tf.name_scope(self.efficientformer.name):
983
+ self.efficientformer.build(None)
984
+
985
+
986
+ @add_start_docstrings(
987
+ """
988
+ EfficientFormer Model transformer with an image classification head on top of pooled last hidden state, e.g. for
989
+ ImageNet.
990
+ """,
991
+ EFFICIENTFORMER_START_DOCSTRING,
992
+ )
993
+ class TFEfficientFormerForImageClassification(TFEfficientFormerPreTrainedModel, TFSequenceClassificationLoss):
994
+ def __init__(self, config: EfficientFormerConfig):
995
+ super().__init__(config)
996
+
997
+ self.num_labels = config.num_labels
998
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
999
+
1000
+ # Classifier head
1001
+ self.classifier = (
1002
+ keras.layers.Dense(config.num_labels, name="classifier")
1003
+ if config.num_labels > 0
1004
+ else keras.layers.Activation("linear", name="classifier")
1005
+ )
1006
+ self.config = config
1007
+
1008
+ @unpack_inputs
1009
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
1010
+ @add_code_sample_docstrings(
1011
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
1012
+ output_type=TFImageClassifierOutput,
1013
+ config_class=_CONFIG_FOR_DOC,
1014
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
1015
+ )
1016
+ def call(
1017
+ self,
1018
+ pixel_values: Optional[tf.Tensor] = None,
1019
+ labels: Optional[tf.Tensor] = None,
1020
+ output_attentions: Optional[bool] = None,
1021
+ output_hidden_states: Optional[bool] = None,
1022
+ return_dict: Optional[bool] = None,
1023
+ training: bool = False,
1024
+ ) -> Union[tf.Tensor, TFImageClassifierOutput]:
1025
+ r"""
1026
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1027
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1028
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1029
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1030
+ """
1031
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1032
+
1033
+ outputs = self.efficientformer(
1034
+ pixel_values=pixel_values,
1035
+ output_attentions=output_attentions,
1036
+ output_hidden_states=output_hidden_states,
1037
+ return_dict=return_dict,
1038
+ training=training,
1039
+ )
1040
+
1041
+ sequence_output = outputs[0]
1042
+
1043
+ logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
1044
+
1045
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1046
+
1047
+ if not return_dict:
1048
+ output = (logits,) + outputs[1:]
1049
+ return ((loss,) + output) if loss is not None else output
1050
+
1051
+ return TFImageClassifierOutput(
1052
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1053
+ )
1054
+
1055
+ def build(self, input_shape=None):
1056
+ if self.built:
1057
+ return
1058
+ self.built = True
1059
+ if getattr(self, "efficientformer", None) is not None:
1060
+ with tf.name_scope(self.efficientformer.name):
1061
+ self.efficientformer.build(None)
1062
+ if getattr(self, "classifier", None) is not None:
1063
+ if hasattr(self.classifier, "name"):
1064
+ with tf.name_scope(self.classifier.name):
1065
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
1066
+
1067
+
1068
+ @dataclass
1069
+ class TFEfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
1070
+ """
1071
+ Args:
1072
+ Output type of [`EfficientFormerForImageClassificationWithTeacher`].
1073
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
1074
+ Prediction scores as the average of the cls_logits and distillation logits.
1075
+ cls_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
1076
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
1077
+ class token).
1078
+ distillation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
1079
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
1080
+ distillation token).
1081
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when
1082
+ `config.output_hidden_states=True`):
1083
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1084
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
1085
+ the initial embedding outputs.
1086
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when
1087
+ `config.output_attentions=True`):
1088
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1089
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
1090
+ the self-attention heads.
1091
+ """
1092
+
1093
+ logits: tf.Tensor = None
1094
+ cls_logits: tf.Tensor = None
1095
+ distillation_logits: tf.Tensor = None
1096
+ hidden_states: Optional[Tuple[tf.Tensor]] = None
1097
+ attentions: Optional[Tuple[tf.Tensor]] = None
1098
+
1099
+
1100
+ @add_start_docstrings(
1101
+ """
1102
+ EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden
1103
+ state and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
1104
+
1105
+ .. warning::
1106
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
1107
+ supported.
1108
+ """,
1109
+ EFFICIENTFORMER_START_DOCSTRING,
1110
+ )
1111
+ class TFEfficientFormerForImageClassificationWithTeacher(TFEfficientFormerPreTrainedModel):
1112
+ def __init__(self, config: EfficientFormerConfig) -> None:
1113
+ super().__init__(config)
1114
+
1115
+ self.num_labels = config.num_labels
1116
+ self.efficientformer = TFEfficientFormerMainLayer(config, name="efficientformer")
1117
+
1118
+ # Classifier heads
1119
+ self.classifier = (
1120
+ keras.layers.Dense(config.num_labels, name="classifier")
1121
+ if config.num_labels > 0
1122
+ else keras.layers.Activation("linear", name="classifier")
1123
+ )
1124
+ self.distillation_classifier = (
1125
+ keras.layers.Dense(config.num_labels, name="distillation_classifier")
1126
+ if config.num_labels > 0
1127
+ else keras.layers.Activation("linear", name="distillation_classifier")
1128
+ )
1129
+
1130
+ @unpack_inputs
1131
+ @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
1132
+ @add_code_sample_docstrings(
1133
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
1134
+ output_type=TFEfficientFormerForImageClassificationWithTeacherOutput,
1135
+ config_class=_CONFIG_FOR_DOC,
1136
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
1137
+ )
1138
+ def call(
1139
+ self,
1140
+ pixel_values: Optional[tf.Tensor] = None,
1141
+ output_attentions: Optional[bool] = None,
1142
+ output_hidden_states: Optional[bool] = None,
1143
+ return_dict: Optional[bool] = None,
1144
+ training: bool = False,
1145
+ ) -> Union[tuple, TFEfficientFormerForImageClassificationWithTeacherOutput]:
1146
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1147
+
1148
+ if training:
1149
+ raise Exception(
1150
+ "This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet supported."
1151
+ )
1152
+
1153
+ outputs = self.efficientformer(
1154
+ pixel_values=pixel_values,
1155
+ output_attentions=output_attentions,
1156
+ output_hidden_states=output_hidden_states,
1157
+ return_dict=return_dict,
1158
+ training=training,
1159
+ )
1160
+
1161
+ sequence_output = outputs[0]
1162
+
1163
+ cls_logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
1164
+ distillation_logits = self.distillation_classifier(tf.reduce_mean(sequence_output, axis=-2))
1165
+ logits = (cls_logits + distillation_logits) / 2
1166
+
1167
+ if not return_dict:
1168
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
1169
+ return output
1170
+
1171
+ return TFEfficientFormerForImageClassificationWithTeacherOutput(
1172
+ logits=logits,
1173
+ cls_logits=cls_logits,
1174
+ distillation_logits=distillation_logits,
1175
+ hidden_states=outputs.hidden_states,
1176
+ attentions=outputs.attentions,
1177
+ )
1178
+
1179
+ def build(self, input_shape=None):
1180
+ if self.built:
1181
+ return
1182
+ self.built = True
1183
+ if getattr(self, "efficientformer", None) is not None:
1184
+ with tf.name_scope(self.efficientformer.name):
1185
+ self.efficientformer.build(None)
1186
+ if getattr(self, "classifier", None) is not None:
1187
+ if hasattr(self.classifier, "name"):
1188
+ with tf.name_scope(self.classifier.name):
1189
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
1190
+ if getattr(self, "distillation_classifier", None) is not None:
1191
+ if hasattr(self.distillation_classifier, "name"):
1192
+ with tf.name_scope(self.distillation_classifier.name):
1193
+ self.distillation_classifier.build([None, None, self.config.hidden_sizes[-1]])
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
20
+
21
+ try:
22
+ if not is_vision_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["feature_extraction_glpn"] = ["GLPNFeatureExtractor"]
28
+ _import_structure["image_processing_glpn"] = ["GLPNImageProcessor"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_glpn"] = [
37
+ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "GLPNForDepthEstimation",
39
+ "GLPNLayer",
40
+ "GLPNModel",
41
+ "GLPNPreTrainedModel",
42
+ ]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
47
+
48
+ try:
49
+ if not is_vision_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .feature_extraction_glpn import GLPNFeatureExtractor
55
+ from .image_processing_glpn import GLPNImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_glpn import (
64
+ GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ GLPNForDepthEstimation,
66
+ GLPNLayer,
67
+ GLPNModel,
68
+ GLPNPreTrainedModel,
69
+ )
70
+
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc ADDED
Binary file (5.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc ADDED
Binary file (1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc ADDED
Binary file (9.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc ADDED
Binary file (23.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GLPN model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GLPNConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
30
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
31
+ defaults will yield a similar configuration to that of the GLPN
32
+ [vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ num_channels (`int`, *optional*, defaults to 3):
39
+ The number of input channels.
40
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
41
+ The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
42
+ depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
43
+ The number of layers in each encoder block.
44
+ sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
45
+ Sequence reduction ratios in each encoder block.
46
+ hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
47
+ Dimension of each of the encoder blocks.
48
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
49
+ Patch size before each encoder block.
50
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
51
+ Stride before each encoder block.
52
+ num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
53
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
54
+ mlp_ratios (`List[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
55
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
56
+ encoder blocks.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for the attention probabilities.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
67
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
69
+ The epsilon used by the layer normalization layers.
70
+ decoder_hidden_size (`int`, *optional*, defaults to 64):
71
+ The dimension of the decoder.
72
+ max_depth (`int`, *optional*, defaults to 10):
73
+ The maximum depth of the decoder.
74
+ head_in_index (`int`, *optional*, defaults to -1):
75
+ The index of the features to use in the head.
76
+
77
+ Example:
78
+
79
+ ```python
80
+ >>> from transformers import GLPNModel, GLPNConfig
81
+
82
+ >>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
83
+ >>> configuration = GLPNConfig()
84
+
85
+ >>> # Initializing a model from the vinvino02/glpn-kitti style configuration
86
+ >>> model = GLPNModel(configuration)
87
+
88
+ >>> # Accessing the model configuration
89
+ >>> configuration = model.config
90
+ ```"""
91
+
92
+ model_type = "glpn"
93
+
94
+ def __init__(
95
+ self,
96
+ num_channels=3,
97
+ num_encoder_blocks=4,
98
+ depths=[2, 2, 2, 2],
99
+ sr_ratios=[8, 4, 2, 1],
100
+ hidden_sizes=[32, 64, 160, 256],
101
+ patch_sizes=[7, 3, 3, 3],
102
+ strides=[4, 2, 2, 2],
103
+ num_attention_heads=[1, 2, 5, 8],
104
+ mlp_ratios=[4, 4, 4, 4],
105
+ hidden_act="gelu",
106
+ hidden_dropout_prob=0.0,
107
+ attention_probs_dropout_prob=0.0,
108
+ initializer_range=0.02,
109
+ drop_path_rate=0.1,
110
+ layer_norm_eps=1e-6,
111
+ decoder_hidden_size=64,
112
+ max_depth=10,
113
+ head_in_index=-1,
114
+ **kwargs,
115
+ ):
116
+ super().__init__(**kwargs)
117
+
118
+ self.num_channels = num_channels
119
+ self.num_encoder_blocks = num_encoder_blocks
120
+ self.depths = depths
121
+ self.sr_ratios = sr_ratios
122
+ self.hidden_sizes = hidden_sizes
123
+ self.patch_sizes = patch_sizes
124
+ self.strides = strides
125
+ self.mlp_ratios = mlp_ratios
126
+ self.num_attention_heads = num_attention_heads
127
+ self.hidden_act = hidden_act
128
+ self.hidden_dropout_prob = hidden_dropout_prob
129
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
130
+ self.initializer_range = initializer_range
131
+ self.drop_path_rate = drop_path_rate
132
+ self.layer_norm_eps = layer_norm_eps
133
+ self.decoder_hidden_size = decoder_hidden_size
134
+ self.max_depth = max_depth
135
+ self.head_in_index = head_in_index
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert GLPN checkpoints."""
16
+
17
+
18
+ import argparse
19
+ from collections import OrderedDict
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from PIL import Image
25
+
26
+ from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
27
+ from transformers.utils import logging
28
+
29
+
30
+ logging.set_verbosity_info()
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ def rename_keys(state_dict):
35
+ new_state_dict = OrderedDict()
36
+ for key, value in state_dict.items():
37
+ if key.startswith("module.encoder"):
38
+ key = key.replace("module.encoder", "glpn.encoder")
39
+ if key.startswith("module.decoder"):
40
+ key = key.replace("module.decoder", "decoder.stages")
41
+ if "patch_embed" in key:
42
+ # replace for example patch_embed1 by patch_embeddings.0
43
+ idx = key[key.find("patch_embed") + len("patch_embed")]
44
+ key = key.replace(f"patch_embed{idx}", f"patch_embeddings.{int(idx)-1}")
45
+ if "norm" in key:
46
+ key = key.replace("norm", "layer_norm")
47
+ if "glpn.encoder.layer_norm" in key:
48
+ # replace for example layer_norm1 by layer_norm.0
49
+ idx = key[key.find("glpn.encoder.layer_norm") + len("glpn.encoder.layer_norm")]
50
+ key = key.replace(f"layer_norm{idx}", f"layer_norm.{int(idx)-1}")
51
+ if "layer_norm1" in key:
52
+ key = key.replace("layer_norm1", "layer_norm_1")
53
+ if "layer_norm2" in key:
54
+ key = key.replace("layer_norm2", "layer_norm_2")
55
+ if "block" in key:
56
+ # replace for example block1 by block.0
57
+ idx = key[key.find("block") + len("block")]
58
+ key = key.replace(f"block{idx}", f"block.{int(idx)-1}")
59
+ if "attn.q" in key:
60
+ key = key.replace("attn.q", "attention.self.query")
61
+ if "attn.proj" in key:
62
+ key = key.replace("attn.proj", "attention.output.dense")
63
+ if "attn" in key:
64
+ key = key.replace("attn", "attention.self")
65
+ if "fc1" in key:
66
+ key = key.replace("fc1", "dense1")
67
+ if "fc2" in key:
68
+ key = key.replace("fc2", "dense2")
69
+ if "linear_pred" in key:
70
+ key = key.replace("linear_pred", "classifier")
71
+ if "linear_fuse" in key:
72
+ key = key.replace("linear_fuse.conv", "linear_fuse")
73
+ key = key.replace("linear_fuse.bn", "batch_norm")
74
+ if "linear_c" in key:
75
+ # replace for example linear_c4 by linear_c.3
76
+ idx = key[key.find("linear_c") + len("linear_c")]
77
+ key = key.replace(f"linear_c{idx}", f"linear_c.{int(idx)-1}")
78
+ if "bot_conv" in key:
79
+ key = key.replace("bot_conv", "0.convolution")
80
+ if "skip_conv1" in key:
81
+ key = key.replace("skip_conv1", "1.convolution")
82
+ if "skip_conv2" in key:
83
+ key = key.replace("skip_conv2", "2.convolution")
84
+ if "fusion1" in key:
85
+ key = key.replace("fusion1", "1.fusion")
86
+ if "fusion2" in key:
87
+ key = key.replace("fusion2", "2.fusion")
88
+ if "fusion3" in key:
89
+ key = key.replace("fusion3", "3.fusion")
90
+ if "fusion" in key and "conv" in key:
91
+ key = key.replace("conv", "convolutional_layer")
92
+ if key.startswith("module.last_layer_depth"):
93
+ key = key.replace("module.last_layer_depth", "head.head")
94
+ new_state_dict[key] = value
95
+
96
+ return new_state_dict
97
+
98
+
99
+ def read_in_k_v(state_dict, config):
100
+ # for each of the encoder blocks:
101
+ for i in range(config.num_encoder_blocks):
102
+ for j in range(config.depths[i]):
103
+ # read in weights + bias of keys and values (which is a single matrix in the original implementation)
104
+ kv_weight = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight")
105
+ kv_bias = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias")
106
+ # next, add keys and values (in that order) to the state dict
107
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[
108
+ : config.hidden_sizes[i], :
109
+ ]
110
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]]
111
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[
112
+ config.hidden_sizes[i] :, :
113
+ ]
114
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :]
115
+
116
+
117
+ # We will verify our results on a COCO image
118
+ def prepare_img():
119
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
120
+ image = Image.open(requests.get(url, stream=True).raw)
121
+
122
+ return image
123
+
124
+
125
+ @torch.no_grad()
126
+ def convert_glpn_checkpoint(checkpoint_path, pytorch_dump_folder_path, push_to_hub=False, model_name=None):
127
+ """
128
+ Copy/paste/tweak model's weights to our GLPN structure.
129
+ """
130
+
131
+ # load GLPN configuration (Segformer-B4 size)
132
+ config = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3])
133
+
134
+ # load image processor (only resize + rescale)
135
+ image_processor = GLPNImageProcessor()
136
+
137
+ # prepare image
138
+ image = prepare_img()
139
+ pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
140
+
141
+ logger.info("Converting model...")
142
+
143
+ # load original state dict
144
+ state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
145
+
146
+ # rename keys
147
+ state_dict = rename_keys(state_dict)
148
+
149
+ # key and value matrices need special treatment
150
+ read_in_k_v(state_dict, config)
151
+
152
+ # create HuggingFace model and load state dict
153
+ model = GLPNForDepthEstimation(config)
154
+ model.load_state_dict(state_dict)
155
+ model.eval()
156
+
157
+ # forward pass
158
+ outputs = model(pixel_values)
159
+ predicted_depth = outputs.predicted_depth
160
+
161
+ # verify output
162
+ if model_name is not None:
163
+ if "nyu" in model_name:
164
+ expected_slice = torch.tensor(
165
+ [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]]
166
+ )
167
+ elif "kitti" in model_name:
168
+ expected_slice = torch.tensor(
169
+ [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
170
+ )
171
+ else:
172
+ raise ValueError(f"Unknown model name: {model_name}")
173
+
174
+ expected_shape = torch.Size([1, 480, 640])
175
+
176
+ assert predicted_depth.shape == expected_shape
177
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)
178
+ print("Looks ok!")
179
+
180
+ # finally, push to hub if required
181
+ if push_to_hub:
182
+ logger.info("Pushing model and image processor to the hub...")
183
+ model.push_to_hub(
184
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
185
+ organization="nielsr",
186
+ commit_message="Add model",
187
+ use_temp_dir=True,
188
+ )
189
+ image_processor.push_to_hub(
190
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
191
+ organization="nielsr",
192
+ commit_message="Add image processor",
193
+ use_temp_dir=True,
194
+ )
195
+
196
+
197
+ if __name__ == "__main__":
198
+ parser = argparse.ArgumentParser()
199
+
200
+ parser.add_argument(
201
+ "--checkpoint_path",
202
+ default=None,
203
+ type=str,
204
+ help="Path to the original PyTorch checkpoint (.pth file).",
205
+ )
206
+ parser.add_argument(
207
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
208
+ )
209
+ parser.add_argument(
210
+ "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
211
+ )
212
+ parser.add_argument(
213
+ "--model_name",
214
+ default="glpn-kitti",
215
+ type=str,
216
+ help="Name of the model in case you're pushing to the hub.",
217
+ )
218
+ args = parser.parse_args()
219
+ convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for GLPN."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_glpn import GLPNImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class GLPNFeatureExtractor(GLPNImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use GLPNImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for GLPN."""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
23
+ from ...image_transforms import resize, to_channel_dimension_format
24
+ from ...image_utils import (
25
+ ChannelDimension,
26
+ PILImageResampling,
27
+ get_image_size,
28
+ infer_channel_dimension_format,
29
+ is_scaled_image,
30
+ make_list_of_images,
31
+ to_numpy_array,
32
+ valid_images,
33
+ validate_kwargs,
34
+ validate_preprocess_arguments,
35
+ )
36
+ from ...utils import TensorType, logging
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ class GLPNImageProcessor(BaseImageProcessor):
43
+ r"""
44
+ Constructs a GLPN image processor.
45
+
46
+ Args:
47
+ do_resize (`bool`, *optional*, defaults to `True`):
48
+ Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of
49
+ `size_divisor`. Can be overridden by `do_resize` in `preprocess`.
50
+ size_divisor (`int`, *optional*, defaults to 32):
51
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
52
+ multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`.
53
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`):
54
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
55
+ do_rescale (`bool`, *optional*, defaults to `True`):
56
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be
57
+ overridden by `do_rescale` in `preprocess`.
58
+ """
59
+
60
+ model_input_names = ["pixel_values"]
61
+
62
+ def __init__(
63
+ self,
64
+ do_resize: bool = True,
65
+ size_divisor: int = 32,
66
+ resample=PILImageResampling.BILINEAR,
67
+ do_rescale: bool = True,
68
+ **kwargs,
69
+ ) -> None:
70
+ self.do_resize = do_resize
71
+ self.do_rescale = do_rescale
72
+ self.size_divisor = size_divisor
73
+ self.resample = resample
74
+ super().__init__(**kwargs)
75
+ self._valid_processor_keys = [
76
+ "images",
77
+ "do_resize",
78
+ "size_divisor",
79
+ "resample",
80
+ "do_rescale",
81
+ "return_tensors",
82
+ "data_format",
83
+ "input_data_format",
84
+ ]
85
+
86
+ def resize(
87
+ self,
88
+ image: np.ndarray,
89
+ size_divisor: int,
90
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
91
+ data_format: Optional[ChannelDimension] = None,
92
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
93
+ **kwargs,
94
+ ) -> np.ndarray:
95
+ """
96
+ Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor.
97
+
98
+ If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
99
+
100
+ Args:
101
+ image (`np.ndarray`):
102
+ The image to resize.
103
+ size_divisor (`int`):
104
+ The image is resized so its height and width are rounded down to the closest multiple of
105
+ `size_divisor`.
106
+ resample:
107
+ `PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
108
+ data_format (`ChannelDimension` or `str`, *optional*):
109
+ The channel dimension format for the output image. If `None`, the channel dimension format of the input
110
+ image is used. Can be one of:
111
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
112
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
113
+ input_data_format (`ChannelDimension` or `str`, *optional*):
114
+ The channel dimension format of the input image. If not set, the channel dimension format is inferred
115
+ from the input image. Can be one of:
116
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
117
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
118
+
119
+ Returns:
120
+ `np.ndarray`: The resized image.
121
+ """
122
+ height, width = get_image_size(image, channel_dim=input_data_format)
123
+ # Rounds the height and width down to the closest multiple of size_divisor
124
+ new_h = height // size_divisor * size_divisor
125
+ new_w = width // size_divisor * size_divisor
126
+ image = resize(
127
+ image,
128
+ (new_h, new_w),
129
+ resample=resample,
130
+ data_format=data_format,
131
+ input_data_format=input_data_format,
132
+ **kwargs,
133
+ )
134
+ return image
135
+
136
+ def preprocess(
137
+ self,
138
+ images: Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]],
139
+ do_resize: Optional[bool] = None,
140
+ size_divisor: Optional[int] = None,
141
+ resample=None,
142
+ do_rescale: Optional[bool] = None,
143
+ return_tensors: Optional[Union[TensorType, str]] = None,
144
+ data_format: ChannelDimension = ChannelDimension.FIRST,
145
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
146
+ **kwargs,
147
+ ) -> BatchFeature:
148
+ """
149
+ Preprocess the given images.
150
+
151
+ Args:
152
+ images (`PIL.Image.Image` or `TensorType` or `List[np.ndarray]` or `List[TensorType]`):
153
+ Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
154
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
155
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
156
+ Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`.
157
+ size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
158
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the
159
+ closest multiple of `size_divisor`.
160
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`):
161
+ `PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
162
+ an effect if `do_resize` is set to `True`.
163
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
164
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
165
+ return_tensors (`str` or `TensorType`, *optional*):
166
+ The type of tensors to return. Can be one of:
167
+ - `None`: Return a list of `np.ndarray`.
168
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
169
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
170
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
171
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
172
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
173
+ The channel dimension format for the output image. Can be one of:
174
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
175
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
176
+ input_data_format (`ChannelDimension` or `str`, *optional*):
177
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
178
+ from the input image. Can be one of:
179
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
180
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
181
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
182
+ """
183
+ do_resize = do_resize if do_resize is not None else self.do_resize
184
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
185
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
186
+ resample = resample if resample is not None else self.resample
187
+
188
+ images = make_list_of_images(images)
189
+
190
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
191
+
192
+ if not valid_images(images):
193
+ raise ValueError(
194
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
195
+ "torch.Tensor, tf.Tensor or jax.ndarray."
196
+ )
197
+
198
+ # Here, the rescale() method uses a constant rescale_factor. It does not need to be validated
199
+ # with a rescale_factor.
200
+ validate_preprocess_arguments(
201
+ do_resize=do_resize,
202
+ size=size_divisor, # Here, size_divisor is used as a parameter for optimal resizing instead of size.
203
+ resample=resample,
204
+ )
205
+
206
+ # All transformations expect numpy arrays.
207
+ images = [to_numpy_array(img) for img in images]
208
+
209
+ if is_scaled_image(images[0]) and do_rescale:
210
+ logger.warning_once(
211
+ "It looks like you are trying to rescale already rescaled images. If the input"
212
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
213
+ )
214
+
215
+ if input_data_format is None:
216
+ # We assume that all images have the same channel dimension format.
217
+ input_data_format = infer_channel_dimension_format(images[0])
218
+
219
+ if do_resize:
220
+ images = [
221
+ self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format)
222
+ for image in images
223
+ ]
224
+
225
+ if do_rescale:
226
+ images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images]
227
+
228
+ images = [
229
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
230
+ ]
231
+
232
+ data = {"pixel_values": images}
233
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GLPN model."""
16
+
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import (
30
+ add_code_sample_docstrings,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_glpn import GLPNConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ # General docstring
43
+ _CONFIG_FOR_DOC = "GLPNConfig"
44
+
45
+ # Base docstring
46
+ _CHECKPOINT_FOR_DOC = "vinvino02/glpn-kitti"
47
+ _EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20]
48
+
49
+
50
+ from ..deprecated._archive_maps import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ # Copied from transformers.models.beit.modeling_beit.drop_path
54
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
55
+ """
56
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
57
+
58
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
59
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
60
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
61
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
62
+ argument.
63
+ """
64
+ if drop_prob == 0.0 or not training:
65
+ return input
66
+ keep_prob = 1 - drop_prob
67
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
68
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
69
+ random_tensor.floor_() # binarize
70
+ output = input.div(keep_prob) * random_tensor
71
+ return output
72
+
73
+
74
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerDropPath
75
+ class GLPNDropPath(nn.Module):
76
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
77
+
78
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
79
+ super().__init__()
80
+ self.drop_prob = drop_prob
81
+
82
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
83
+ return drop_path(hidden_states, self.drop_prob, self.training)
84
+
85
+ def extra_repr(self) -> str:
86
+ return "p={}".format(self.drop_prob)
87
+
88
+
89
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerOverlapPatchEmbeddings
90
+ class GLPNOverlapPatchEmbeddings(nn.Module):
91
+ """Construct the overlapping patch embeddings."""
92
+
93
+ def __init__(self, patch_size, stride, num_channels, hidden_size):
94
+ super().__init__()
95
+ self.proj = nn.Conv2d(
96
+ num_channels,
97
+ hidden_size,
98
+ kernel_size=patch_size,
99
+ stride=stride,
100
+ padding=patch_size // 2,
101
+ )
102
+
103
+ self.layer_norm = nn.LayerNorm(hidden_size)
104
+
105
+ def forward(self, pixel_values):
106
+ embeddings = self.proj(pixel_values)
107
+ _, _, height, width = embeddings.shape
108
+ # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels)
109
+ # this can be fed to a Transformer layer
110
+ embeddings = embeddings.flatten(2).transpose(1, 2)
111
+ embeddings = self.layer_norm(embeddings)
112
+ return embeddings, height, width
113
+
114
+
115
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerEfficientSelfAttention
116
+ class GLPNEfficientSelfAttention(nn.Module):
117
+ """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
118
+ paper](https://arxiv.org/abs/2102.12122)."""
119
+
120
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
121
+ super().__init__()
122
+ self.hidden_size = hidden_size
123
+ self.num_attention_heads = num_attention_heads
124
+
125
+ if self.hidden_size % self.num_attention_heads != 0:
126
+ raise ValueError(
127
+ f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention "
128
+ f"heads ({self.num_attention_heads})"
129
+ )
130
+
131
+ self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
132
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
133
+
134
+ self.query = nn.Linear(self.hidden_size, self.all_head_size)
135
+ self.key = nn.Linear(self.hidden_size, self.all_head_size)
136
+ self.value = nn.Linear(self.hidden_size, self.all_head_size)
137
+
138
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
139
+
140
+ self.sr_ratio = sequence_reduction_ratio
141
+ if sequence_reduction_ratio > 1:
142
+ self.sr = nn.Conv2d(
143
+ hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio
144
+ )
145
+ self.layer_norm = nn.LayerNorm(hidden_size)
146
+
147
+ def transpose_for_scores(self, hidden_states):
148
+ new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
149
+ hidden_states = hidden_states.view(new_shape)
150
+ return hidden_states.permute(0, 2, 1, 3)
151
+
152
+ def forward(
153
+ self,
154
+ hidden_states,
155
+ height,
156
+ width,
157
+ output_attentions=False,
158
+ ):
159
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
160
+
161
+ if self.sr_ratio > 1:
162
+ batch_size, seq_len, num_channels = hidden_states.shape
163
+ # Reshape to (batch_size, num_channels, height, width)
164
+ hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
165
+ # Apply sequence reduction
166
+ hidden_states = self.sr(hidden_states)
167
+ # Reshape back to (batch_size, seq_len, num_channels)
168
+ hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
169
+ hidden_states = self.layer_norm(hidden_states)
170
+
171
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
172
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
173
+
174
+ # Take the dot product between "query" and "key" to get the raw attention scores.
175
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
176
+
177
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
178
+
179
+ # Normalize the attention scores to probabilities.
180
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
181
+
182
+ # This is actually dropping out entire tokens to attend to, which might
183
+ # seem a bit unusual, but is taken from the original Transformer paper.
184
+ attention_probs = self.dropout(attention_probs)
185
+
186
+ context_layer = torch.matmul(attention_probs, value_layer)
187
+
188
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
189
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
190
+ context_layer = context_layer.view(new_context_layer_shape)
191
+
192
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
193
+
194
+ return outputs
195
+
196
+
197
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerSelfOutput
198
+ class GLPNSelfOutput(nn.Module):
199
+ def __init__(self, config, hidden_size):
200
+ super().__init__()
201
+ self.dense = nn.Linear(hidden_size, hidden_size)
202
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
203
+
204
+ def forward(self, hidden_states, input_tensor):
205
+ hidden_states = self.dense(hidden_states)
206
+ hidden_states = self.dropout(hidden_states)
207
+ return hidden_states
208
+
209
+
210
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerAttention with Segformer->GLPN
211
+ class GLPNAttention(nn.Module):
212
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
213
+ super().__init__()
214
+ self.self = GLPNEfficientSelfAttention(
215
+ config=config,
216
+ hidden_size=hidden_size,
217
+ num_attention_heads=num_attention_heads,
218
+ sequence_reduction_ratio=sequence_reduction_ratio,
219
+ )
220
+ self.output = GLPNSelfOutput(config, hidden_size=hidden_size)
221
+ self.pruned_heads = set()
222
+
223
+ def prune_heads(self, heads):
224
+ if len(heads) == 0:
225
+ return
226
+ heads, index = find_pruneable_heads_and_indices(
227
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
228
+ )
229
+
230
+ # Prune linear layers
231
+ self.self.query = prune_linear_layer(self.self.query, index)
232
+ self.self.key = prune_linear_layer(self.self.key, index)
233
+ self.self.value = prune_linear_layer(self.self.value, index)
234
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
235
+
236
+ # Update hyper params and store pruned heads
237
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
238
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
239
+ self.pruned_heads = self.pruned_heads.union(heads)
240
+
241
+ def forward(self, hidden_states, height, width, output_attentions=False):
242
+ self_outputs = self.self(hidden_states, height, width, output_attentions)
243
+
244
+ attention_output = self.output(self_outputs[0], hidden_states)
245
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
246
+ return outputs
247
+
248
+
249
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerDWConv
250
+ class GLPNDWConv(nn.Module):
251
+ def __init__(self, dim=768):
252
+ super().__init__()
253
+ self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
254
+
255
+ def forward(self, hidden_states, height, width):
256
+ batch_size, seq_len, num_channels = hidden_states.shape
257
+ hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
258
+ hidden_states = self.dwconv(hidden_states)
259
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
260
+
261
+ return hidden_states
262
+
263
+
264
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerMixFFN with Segformer->GLPN
265
+ class GLPNMixFFN(nn.Module):
266
+ def __init__(self, config, in_features, hidden_features=None, out_features=None):
267
+ super().__init__()
268
+ out_features = out_features or in_features
269
+ self.dense1 = nn.Linear(in_features, hidden_features)
270
+ self.dwconv = GLPNDWConv(hidden_features)
271
+ if isinstance(config.hidden_act, str):
272
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
273
+ else:
274
+ self.intermediate_act_fn = config.hidden_act
275
+ self.dense2 = nn.Linear(hidden_features, out_features)
276
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
277
+
278
+ def forward(self, hidden_states, height, width):
279
+ hidden_states = self.dense1(hidden_states)
280
+ hidden_states = self.dwconv(hidden_states, height, width)
281
+ hidden_states = self.intermediate_act_fn(hidden_states)
282
+ hidden_states = self.dropout(hidden_states)
283
+ hidden_states = self.dense2(hidden_states)
284
+ hidden_states = self.dropout(hidden_states)
285
+ return hidden_states
286
+
287
+
288
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerLayer with Segformer->GLPN
289
+ class GLPNLayer(nn.Module):
290
+ """This corresponds to the Block class in the original implementation."""
291
+
292
+ def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
293
+ super().__init__()
294
+ self.layer_norm_1 = nn.LayerNorm(hidden_size)
295
+ self.attention = GLPNAttention(
296
+ config,
297
+ hidden_size=hidden_size,
298
+ num_attention_heads=num_attention_heads,
299
+ sequence_reduction_ratio=sequence_reduction_ratio,
300
+ )
301
+ self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
302
+ self.layer_norm_2 = nn.LayerNorm(hidden_size)
303
+ mlp_hidden_size = int(hidden_size * mlp_ratio)
304
+ self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
305
+
306
+ def forward(self, hidden_states, height, width, output_attentions=False):
307
+ self_attention_outputs = self.attention(
308
+ self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention
309
+ height,
310
+ width,
311
+ output_attentions=output_attentions,
312
+ )
313
+
314
+ attention_output = self_attention_outputs[0]
315
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
316
+
317
+ # first residual connection (with stochastic depth)
318
+ attention_output = self.drop_path(attention_output)
319
+ hidden_states = attention_output + hidden_states
320
+
321
+ mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
322
+
323
+ # second residual connection (with stochastic depth)
324
+ mlp_output = self.drop_path(mlp_output)
325
+ layer_output = mlp_output + hidden_states
326
+
327
+ outputs = (layer_output,) + outputs
328
+
329
+ return outputs
330
+
331
+
332
+ class GLPNEncoder(nn.Module):
333
+ def __init__(self, config):
334
+ super().__init__()
335
+ self.config = config
336
+
337
+ # stochastic depth decay rule
338
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
339
+
340
+ # patch embeddings
341
+ embeddings = []
342
+ for i in range(config.num_encoder_blocks):
343
+ embeddings.append(
344
+ GLPNOverlapPatchEmbeddings(
345
+ patch_size=config.patch_sizes[i],
346
+ stride=config.strides[i],
347
+ num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
348
+ hidden_size=config.hidden_sizes[i],
349
+ )
350
+ )
351
+ self.patch_embeddings = nn.ModuleList(embeddings)
352
+
353
+ # Transformer blocks
354
+ blocks = []
355
+ cur = 0
356
+ for i in range(config.num_encoder_blocks):
357
+ # each block consists of layers
358
+ layers = []
359
+ if i != 0:
360
+ cur += config.depths[i - 1]
361
+ for j in range(config.depths[i]):
362
+ layers.append(
363
+ GLPNLayer(
364
+ config,
365
+ hidden_size=config.hidden_sizes[i],
366
+ num_attention_heads=config.num_attention_heads[i],
367
+ drop_path=dpr[cur + j],
368
+ sequence_reduction_ratio=config.sr_ratios[i],
369
+ mlp_ratio=config.mlp_ratios[i],
370
+ )
371
+ )
372
+ blocks.append(nn.ModuleList(layers))
373
+
374
+ self.block = nn.ModuleList(blocks)
375
+
376
+ # Layer norms
377
+ self.layer_norm = nn.ModuleList(
378
+ [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)]
379
+ )
380
+
381
+ def forward(
382
+ self,
383
+ pixel_values,
384
+ output_attentions=False,
385
+ output_hidden_states=False,
386
+ return_dict=True,
387
+ ):
388
+ all_hidden_states = () if output_hidden_states else None
389
+ all_self_attentions = () if output_attentions else None
390
+
391
+ batch_size = pixel_values.shape[0]
392
+
393
+ hidden_states = pixel_values
394
+ for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
395
+ embedding_layer, block_layer, norm_layer = x
396
+ # first, obtain patch embeddings
397
+ hidden_states, height, width = embedding_layer(hidden_states)
398
+ # second, send embeddings through blocks
399
+ for i, blk in enumerate(block_layer):
400
+ layer_outputs = blk(hidden_states, height, width, output_attentions)
401
+ hidden_states = layer_outputs[0]
402
+ if output_attentions:
403
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
404
+ # third, apply layer norm
405
+ hidden_states = norm_layer(hidden_states)
406
+ # fourth, optionally reshape back to (batch_size, num_channels, height, width)
407
+ hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
408
+ if output_hidden_states:
409
+ all_hidden_states = all_hidden_states + (hidden_states,)
410
+
411
+ if not return_dict:
412
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
413
+ return BaseModelOutput(
414
+ last_hidden_state=hidden_states,
415
+ hidden_states=all_hidden_states,
416
+ attentions=all_self_attentions,
417
+ )
418
+
419
+
420
+ class GLPNPreTrainedModel(PreTrainedModel):
421
+ """
422
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
423
+ models.
424
+ """
425
+
426
+ config_class = GLPNConfig
427
+ base_model_prefix = "glpn"
428
+ main_input_name = "pixel_values"
429
+
430
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerPreTrainedModel._init_weights
431
+ def _init_weights(self, module):
432
+ """Initialize the weights"""
433
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
434
+ # Slightly different from the TF version which uses truncated_normal for initialization
435
+ # cf https://github.com/pytorch/pytorch/pull/5617
436
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
437
+ if module.bias is not None:
438
+ module.bias.data.zero_()
439
+ elif isinstance(module, nn.Embedding):
440
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
441
+ if module.padding_idx is not None:
442
+ module.weight.data[module.padding_idx].zero_()
443
+ elif isinstance(module, nn.LayerNorm):
444
+ module.bias.data.zero_()
445
+ module.weight.data.fill_(1.0)
446
+
447
+
448
+ GLPN_START_DOCSTRING = r"""
449
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
450
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
451
+ behavior.
452
+
453
+ Parameters:
454
+ config ([`GLPNConfig`]): Model configuration class with all the parameters of the model.
455
+ Initializing with a config file does not load the weights associated with the model, only the
456
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
457
+ """
458
+
459
+ GLPN_INPUTS_DOCSTRING = r"""
460
+
461
+ Args:
462
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
463
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
464
+ [`AutoImageProcessor`]. See [`GLPNImageProcessor.__call__`] for details.
465
+
466
+ output_attentions (`bool`, *optional*):
467
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
468
+ tensors for more detail.
469
+ output_hidden_states (`bool`, *optional*):
470
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
471
+ more detail.
472
+ return_dict (`bool`, *optional*):
473
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
474
+ """
475
+
476
+
477
+ @add_start_docstrings(
478
+ "The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.",
479
+ GLPN_START_DOCSTRING,
480
+ )
481
+ class GLPNModel(GLPNPreTrainedModel):
482
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.__init__ with Segformer->GLPN
483
+ def __init__(self, config):
484
+ super().__init__(config)
485
+ self.config = config
486
+
487
+ # hierarchical Transformer encoder
488
+ self.encoder = GLPNEncoder(config)
489
+
490
+ # Initialize weights and apply final processing
491
+ self.post_init()
492
+
493
+ def _prune_heads(self, heads_to_prune):
494
+ """
495
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
496
+ class PreTrainedModel
497
+ """
498
+ for layer, heads in heads_to_prune.items():
499
+ self.encoder.layer[layer].attention.prune_heads(heads)
500
+
501
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
502
+ @add_code_sample_docstrings(
503
+ checkpoint=_CHECKPOINT_FOR_DOC,
504
+ output_type=BaseModelOutput,
505
+ config_class=_CONFIG_FOR_DOC,
506
+ modality="vision",
507
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
508
+ )
509
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.forward
510
+ def forward(
511
+ self,
512
+ pixel_values: torch.FloatTensor,
513
+ output_attentions: Optional[bool] = None,
514
+ output_hidden_states: Optional[bool] = None,
515
+ return_dict: Optional[bool] = None,
516
+ ) -> Union[Tuple, BaseModelOutput]:
517
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
518
+ output_hidden_states = (
519
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
520
+ )
521
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
522
+
523
+ encoder_outputs = self.encoder(
524
+ pixel_values,
525
+ output_attentions=output_attentions,
526
+ output_hidden_states=output_hidden_states,
527
+ return_dict=return_dict,
528
+ )
529
+ sequence_output = encoder_outputs[0]
530
+
531
+ if not return_dict:
532
+ return (sequence_output,) + encoder_outputs[1:]
533
+
534
+ return BaseModelOutput(
535
+ last_hidden_state=sequence_output,
536
+ hidden_states=encoder_outputs.hidden_states,
537
+ attentions=encoder_outputs.attentions,
538
+ )
539
+
540
+
541
+ class GLPNSelectiveFeatureFusion(nn.Module):
542
+ """
543
+ Selective Feature Fusion module, as explained in the [paper](https://arxiv.org/abs/2201.07436) (section 3.4). This
544
+ module adaptively selects and integrates local and global features by attaining an attention map for each feature.
545
+ """
546
+
547
+ def __init__(self, in_channel=64):
548
+ super().__init__()
549
+
550
+ self.convolutional_layer1 = nn.Sequential(
551
+ nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1),
552
+ nn.BatchNorm2d(in_channel),
553
+ nn.ReLU(),
554
+ )
555
+
556
+ self.convolutional_layer2 = nn.Sequential(
557
+ nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1),
558
+ nn.BatchNorm2d(int(in_channel / 2)),
559
+ nn.ReLU(),
560
+ )
561
+
562
+ self.convolutional_layer3 = nn.Conv2d(
563
+ in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1
564
+ )
565
+
566
+ self.sigmoid = nn.Sigmoid()
567
+
568
+ def forward(self, local_features, global_features):
569
+ # concatenate features along the channel dimension
570
+ features = torch.cat((local_features, global_features), dim=1)
571
+ # pass through convolutional layers
572
+ features = self.convolutional_layer1(features)
573
+ features = self.convolutional_layer2(features)
574
+ features = self.convolutional_layer3(features)
575
+ # apply sigmoid to get two-channel attention map
576
+ attn = self.sigmoid(features)
577
+ # construct hybrid features by adding element-wise
578
+ hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[
579
+ :, 1, :, :
580
+ ].unsqueeze(1)
581
+
582
+ return hybrid_features
583
+
584
+
585
+ class GLPNDecoderStage(nn.Module):
586
+ def __init__(self, in_channels, out_channels):
587
+ super().__init__()
588
+ should_skip = in_channels == out_channels
589
+ self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity()
590
+ self.fusion = GLPNSelectiveFeatureFusion(out_channels)
591
+ self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
592
+
593
+ def forward(self, hidden_state, residual=None):
594
+ hidden_state = self.convolution(hidden_state)
595
+ if residual is not None:
596
+ hidden_state = self.fusion(hidden_state, residual)
597
+ hidden_state = self.upsample(hidden_state)
598
+
599
+ return hidden_state
600
+
601
+ hidden_state = self.upsample(hidden_state)
602
+ return hidden_state
603
+
604
+
605
+ class GLPNDecoder(nn.Module):
606
+ def __init__(self, config):
607
+ super().__init__()
608
+ # we use features from end -> start
609
+ reserved_hidden_sizes = config.hidden_sizes[::-1]
610
+ out_channels = config.decoder_hidden_size
611
+
612
+ self.stages = nn.ModuleList(
613
+ [GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes]
614
+ )
615
+ # don't fuse in first stage
616
+ self.stages[0].fusion = None
617
+
618
+ self.final_upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
619
+
620
+ def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]:
621
+ stage_hidden_states = []
622
+ stage_hidden_state = None
623
+ for hidden_state, stage in zip(hidden_states[::-1], self.stages):
624
+ stage_hidden_state = stage(hidden_state, stage_hidden_state)
625
+ stage_hidden_states.append(stage_hidden_state)
626
+
627
+ stage_hidden_states[-1] = self.final_upsample(stage_hidden_state)
628
+
629
+ return stage_hidden_states
630
+
631
+
632
+ class SiLogLoss(nn.Module):
633
+ r"""
634
+ Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://arxiv.org/abs/1406.2283).
635
+
636
+ $$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log
637
+ y_{i}^{*}$.
638
+
639
+ """
640
+
641
+ def __init__(self, lambd=0.5):
642
+ super().__init__()
643
+ self.lambd = lambd
644
+
645
+ def forward(self, pred, target):
646
+ valid_mask = (target > 0).detach()
647
+ diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
648
+ loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2))
649
+
650
+ return loss
651
+
652
+
653
+ class GLPNDepthEstimationHead(nn.Module):
654
+ def __init__(self, config):
655
+ super().__init__()
656
+
657
+ self.config = config
658
+
659
+ channels = config.decoder_hidden_size
660
+ self.head = nn.Sequential(
661
+ nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1),
662
+ nn.ReLU(inplace=False),
663
+ nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1),
664
+ )
665
+
666
+ def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor:
667
+ # use last features of the decoder
668
+ hidden_states = hidden_states[self.config.head_in_index]
669
+
670
+ hidden_states = self.head(hidden_states)
671
+
672
+ predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth
673
+ predicted_depth = predicted_depth.squeeze(dim=1)
674
+
675
+ return predicted_depth
676
+
677
+
678
+ @add_start_docstrings(
679
+ """GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.""",
680
+ GLPN_START_DOCSTRING,
681
+ )
682
+ class GLPNForDepthEstimation(GLPNPreTrainedModel):
683
+ def __init__(self, config):
684
+ super().__init__(config)
685
+
686
+ self.glpn = GLPNModel(config)
687
+ self.decoder = GLPNDecoder(config)
688
+ self.head = GLPNDepthEstimationHead(config)
689
+
690
+ # Initialize weights and apply final processing
691
+ self.post_init()
692
+
693
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
694
+ @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
695
+ def forward(
696
+ self,
697
+ pixel_values: torch.FloatTensor,
698
+ labels: Optional[torch.FloatTensor] = None,
699
+ output_attentions: Optional[bool] = None,
700
+ output_hidden_states: Optional[bool] = None,
701
+ return_dict: Optional[bool] = None,
702
+ ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
703
+ r"""
704
+ labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
705
+ Ground truth depth estimation maps for computing the loss.
706
+
707
+ Returns:
708
+
709
+ Examples:
710
+
711
+ ```python
712
+ >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation
713
+ >>> import torch
714
+ >>> import numpy as np
715
+ >>> from PIL import Image
716
+ >>> import requests
717
+
718
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
719
+ >>> image = Image.open(requests.get(url, stream=True).raw)
720
+
721
+ >>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti")
722
+ >>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti")
723
+
724
+ >>> # prepare image for the model
725
+ >>> inputs = image_processor(images=image, return_tensors="pt")
726
+
727
+ >>> with torch.no_grad():
728
+ ... outputs = model(**inputs)
729
+ ... predicted_depth = outputs.predicted_depth
730
+
731
+ >>> # interpolate to original size
732
+ >>> prediction = torch.nn.functional.interpolate(
733
+ ... predicted_depth.unsqueeze(1),
734
+ ... size=image.size[::-1],
735
+ ... mode="bicubic",
736
+ ... align_corners=False,
737
+ ... )
738
+
739
+ >>> # visualize the prediction
740
+ >>> output = prediction.squeeze().cpu().numpy()
741
+ >>> formatted = (output * 255 / np.max(output)).astype("uint8")
742
+ >>> depth = Image.fromarray(formatted)
743
+ ```"""
744
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
745
+ output_hidden_states = (
746
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
747
+ )
748
+
749
+ outputs = self.glpn(
750
+ pixel_values,
751
+ output_attentions=output_attentions,
752
+ output_hidden_states=True, # we need the intermediate hidden states
753
+ return_dict=return_dict,
754
+ )
755
+
756
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
757
+
758
+ out = self.decoder(hidden_states)
759
+ predicted_depth = self.head(out)
760
+
761
+ loss = None
762
+ if labels is not None:
763
+ loss_fct = SiLogLoss()
764
+ loss = loss_fct(predicted_depth, labels)
765
+
766
+ if not return_dict:
767
+ if output_hidden_states:
768
+ output = (predicted_depth,) + outputs[1:]
769
+ else:
770
+ output = (predicted_depth,) + outputs[2:]
771
+ return ((loss,) + output) if loss is not None else output
772
+
773
+ return DepthEstimatorOutput(
774
+ loss=loss,
775
+ predicted_depth=predicted_depth,
776
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
777
+ attentions=outputs.attentions,
778
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_gpt_neo"] = [
30
+ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "GPTNeoForCausalLM",
32
+ "GPTNeoForQuestionAnswering",
33
+ "GPTNeoForSequenceClassification",
34
+ "GPTNeoForTokenClassification",
35
+ "GPTNeoModel",
36
+ "GPTNeoPreTrainedModel",
37
+ "load_tf_weights_in_gpt_neo",
38
+ ]
39
+
40
+ try:
41
+ if not is_flax_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_flax_gpt_neo"] = [
47
+ "FlaxGPTNeoForCausalLM",
48
+ "FlaxGPTNeoModel",
49
+ "FlaxGPTNeoPreTrainedModel",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
55
+
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .modeling_gpt_neo import (
63
+ GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ GPTNeoForCausalLM,
65
+ GPTNeoForQuestionAnswering,
66
+ GPTNeoForSequenceClassification,
67
+ GPTNeoForTokenClassification,
68
+ GPTNeoModel,
69
+ GPTNeoPreTrainedModel,
70
+ load_tf_weights_in_gpt_neo,
71
+ )
72
+
73
+ try:
74
+ if not is_flax_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
80
+
81
+
82
+ else:
83
+ import sys
84
+
85
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/configuration_gpt_neo.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/convert_gpt_neo_mesh_tf_to_pytorch.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/modeling_flax_gpt_neo.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/modeling_gpt_neo.cpython-310.pyc ADDED
Binary file (37.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/configuration_gpt_neo.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPT Neo model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfigWithPast
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class GPTNeoConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`GPTNeoModel`]. It is used to instantiate a GPT
35
+ Neo model according to the specified arguments, defining the model architecture. Instantiating a configuration with
36
+ the defaults will yield a similar configuration to that of the GPTNeo
37
+ [EleutherAI/gpt-neo-1.3B](https://huggingface.co/EleutherAI/gpt-neo-1.3B) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 50257):
45
+ Vocabulary size of the GPT Neo model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`GPTNeoModel`]. Vocabulary size of the model. Defines the different
47
+ tokens that can be represented by the *inputs_ids* passed to the forward method of [`GPTNeoModel`].
48
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
49
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
50
+ just in case (e.g., 512 or 1024 or 2048).
51
+ hidden_size (`int`, *optional*, defaults to 2048):
52
+ Dimensionality of the encoder layers and the pooler layer.
53
+ num_layers (`int`, *optional*, defaults to 24):
54
+ Number of hidden layers in the Transformer encoder.
55
+ attention_types (`List`, *optional*, defaults to `[[['global', 'local'], 12]]`):
56
+ The type of attention for each layer in a `List` of the following format `[[["attention_type"],
57
+ num_layerss]]` e.g. for a 24 layer model `[[["global"], 24]]` or `[[["global", "local"], 12]]` Choose the
58
+ value of `attention_type` from `["global", "local"]`
59
+ num_heads (`int`, *optional*, defaults to 16):
60
+ Number of attention heads for each attention layer in the Transformer encoder.
61
+ intermediate_size (`int`, *optional*, defaults to 8192):
62
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
63
+ window_size (`int`, *optional*, defaults to 256):
64
+ The size of the sliding window for local attention.
65
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu_new"`):
66
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
67
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
68
+ resid_dropout (`float`, *optional*, defaults to 0.0):
69
+ Residual dropout used in the attention pattern.
70
+ embed_dropout (`float`, *optional*, defaults to 0.0):
71
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
72
+ attention_dropout (`float`, *optional*, defaults to 0.0):
73
+ The dropout ratio for the attention probabilities.
74
+ classifier_dropout (`float`, *optional*, defaults to 0.1):
75
+ Argument used when doing token classification, used in the model [`GPTNeoForTokenClassification`]. The
76
+ dropout ratio for the hidden layer.
77
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
78
+ The epsilon used by the layer normalization layers.
79
+ initializer_range (`float`, *optional*, defaults to 0.02):
80
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
81
+ use_cache (`bool`, *optional*, defaults to `True`):
82
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
83
+ relevant if `config.is_decoder=True`.
84
+ bos_token_id (`int`, *optional*, defaults to 50256):
85
+ The id of the beginning of sentence token in the vocabulary.
86
+ eos_token_id (`int`, *optional*, defaults to 50256):
87
+ The id of the end of sentence token in the vocabulary.
88
+
89
+ Example:
90
+
91
+ ```python
92
+ >>> from transformers import GPTNeoConfig, GPTNeoModel
93
+
94
+ >>> # Initializing a GPTNeo EleutherAI/gpt-neo-1.3B style configuration
95
+ >>> configuration = GPTNeoConfig()
96
+
97
+ >>> # Initializing a model (with random weights) from the EleutherAI/gpt-neo-1.3B style configuration
98
+ >>> model = GPTNeoModel(configuration)
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "gpt_neo"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+ attribute_map = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=50257,
111
+ max_position_embeddings=2048,
112
+ hidden_size=2048,
113
+ num_layers=24,
114
+ attention_types=[[["global", "local"], 12]],
115
+ num_heads=16,
116
+ intermediate_size=None,
117
+ window_size=256,
118
+ activation_function="gelu_new",
119
+ resid_dropout=0.0,
120
+ embed_dropout=0.0,
121
+ attention_dropout=0.0,
122
+ classifier_dropout=0.1,
123
+ layer_norm_epsilon=1e-5,
124
+ initializer_range=0.02,
125
+ use_cache=True,
126
+ bos_token_id=50256,
127
+ eos_token_id=50256,
128
+ **kwargs,
129
+ ):
130
+ self.vocab_size = vocab_size
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.hidden_size = hidden_size
133
+ self.num_layers = num_layers
134
+ self.num_heads = num_heads
135
+ self.intermediate_size = intermediate_size
136
+ self.window_size = window_size
137
+ self.activation_function = activation_function
138
+ self.resid_dropout = resid_dropout
139
+ self.embed_dropout = embed_dropout
140
+ self.attention_dropout = attention_dropout
141
+ self.classifier_dropout = classifier_dropout
142
+ self.layer_norm_epsilon = layer_norm_epsilon
143
+ self.initializer_range = initializer_range
144
+ self.use_cache = use_cache
145
+
146
+ self.bos_token_id = bos_token_id
147
+ self.eos_token_id = eos_token_id
148
+
149
+ self.attention_types = attention_types
150
+ self.attention_layers = self.expand_attention_types_params(attention_types)
151
+
152
+ if len(self.attention_layers) != self.num_layers:
153
+ raise ValueError(
154
+ "Configuration for convolutional module is incorrect. "
155
+ "It is required that `len(config.attention_layers)` == `config.num_layers` "
156
+ f"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
157
+ f"`config.num_layers = {self.num_layers}`. "
158
+ "`config.attention_layers` is prepared using `config.attention_types`. "
159
+ "Please verify the value of `config.attention_types` argument."
160
+ )
161
+
162
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
163
+
164
+ @staticmethod
165
+ def expand_attention_types_params(attention_types):
166
+ attentions = []
167
+ for item in attention_types:
168
+ for _ in range(item[1]):
169
+ attentions.extend(item[0])
170
+ return attentions
171
+
172
+
173
+ def custom_unfold(input, dimension, size, step):
174
+ """Custom torch.Tensor.unfold implementation to enable the export to ONNX."""
175
+ import torch
176
+
177
+ shape = input.size()
178
+ rank = len(shape)
179
+ sizedim = shape[dimension]
180
+
181
+ low_indices = torch.arange(0, sizedim, step)
182
+ min_length = torch.div(sizedim - size, step, rounding_mode="floor") + 1
183
+ indices = torch.arange(size) + low_indices[:min_length][:, None]
184
+
185
+ s = [slice(None)] * rank
186
+ s[dimension] = indices
187
+ sliced = input[s]
188
+
189
+ perm = list(range(0, rank + 1))
190
+ perm.append(perm.pop(dimension + 1))
191
+
192
+ return sliced.permute(perm)
193
+
194
+
195
+ def custom_get_block_length_and_num_blocks(seq_length, window_size):
196
+ """
197
+ Custom implementation for GPTNeoAttentionMixin._get_block_length_and_num_blocks to enable the export to ONNX as
198
+ original implementation uses Python variables and control flow.
199
+ """
200
+ import torch
201
+
202
+ candidates = torch.arange(1, window_size)
203
+ remainders = torch.remainder(seq_length, candidates)
204
+ divisor_indices = remainders == 0
205
+ divisors = candidates[divisor_indices]
206
+ largest_divisor = torch.max(divisors)
207
+ return largest_divisor, torch.div(seq_length, largest_divisor, rounding_mode="floor")
208
+
209
+
210
+ class GPTNeoOnnxConfig(OnnxConfigWithPast):
211
+ @property
212
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
213
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
214
+ if self.use_past:
215
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
216
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
217
+ else:
218
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
219
+
220
+ return common_inputs
221
+
222
+ @property
223
+ def num_attention_heads(self) -> int:
224
+ return self._config.num_heads
225
+
226
+ def generate_dummy_inputs(
227
+ self,
228
+ tokenizer: PreTrainedTokenizer,
229
+ batch_size: int = -1,
230
+ seq_length: int = -1,
231
+ is_pair: bool = False,
232
+ framework: Optional[TensorType] = None,
233
+ ) -> Mapping[str, Any]:
234
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
235
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
236
+ )
237
+
238
+ # We need to order the input in the way they appears in the forward()
239
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
240
+
241
+ # Need to add the past_keys
242
+ if self.use_past:
243
+ if not is_torch_available():
244
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
245
+ else:
246
+ import torch
247
+
248
+ batch, seqlen = common_inputs["input_ids"].shape
249
+ # Not using the same length for past_key_values
250
+ past_key_values_length = seqlen + 2
251
+ past_shape = (
252
+ batch,
253
+ self.num_attention_heads,
254
+ past_key_values_length,
255
+ self._config.hidden_size // self.num_attention_heads,
256
+ )
257
+ ordered_inputs["past_key_values"] = [
258
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
259
+ ]
260
+
261
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
262
+ if self.use_past:
263
+ mask_dtype = ordered_inputs["attention_mask"].dtype
264
+ ordered_inputs["attention_mask"] = torch.cat(
265
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
266
+ )
267
+
268
+ return ordered_inputs
269
+
270
+ @property
271
+ def default_onnx_opset(self) -> int:
272
+ return 13
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Eleuther AI and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert GPT Neo checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+
21
+ from transformers import GPTNeoConfig, GPTNeoForCausalLM, load_tf_weights_in_gpt_neo
22
+ from transformers.utils import logging
23
+
24
+
25
+ logging.set_verbosity_info()
26
+
27
+
28
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
29
+ # Initialise PyTorch model
30
+ config_json = json.load(open(config_file, "r"))
31
+ config = GPTNeoConfig(
32
+ hidden_size=config_json["n_embd"],
33
+ num_layers=config_json["n_layer"],
34
+ num_heads=config_json["n_head"],
35
+ attention_types=config_json["attention_types"],
36
+ max_position_embeddings=config_json["n_positions"],
37
+ resid_dropout=config_json["res_dropout"],
38
+ embed_dropout=config_json["embed_dropout"],
39
+ attention_dropout=config_json["attn_dropout"],
40
+ )
41
+ print(f"Building PyTorch model from configuration: {config}")
42
+ model = GPTNeoForCausalLM(config)
43
+
44
+ # Load weights from tf checkpoint
45
+ load_tf_weights_in_gpt_neo(model, config, tf_checkpoint_path)
46
+
47
+ # Save pytorch-model
48
+ print(f"Save PyTorch model to {pytorch_dump_path}")
49
+ model.save_pretrained(pytorch_dump_path)
50
+
51
+
52
+ if __name__ == "__main__":
53
+ parser = argparse.ArgumentParser()
54
+ # Required parameters
55
+ parser.add_argument(
56
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
57
+ )
58
+ parser.add_argument(
59
+ "--config_file",
60
+ default=None,
61
+ type=str,
62
+ required=True,
63
+ help=(
64
+ "The config json file corresponding to the pre-trained mesh-tf model. \n"
65
+ "This specifies the model architecture."
66
+ ),
67
+ )
68
+ parser.add_argument(
69
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
70
+ )
71
+ args = parser.parse_args()
72
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_flax_gpt_neo.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Eleuther AI and The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from functools import partial
17
+ from typing import Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.linen import combine_masks, make_causal_mask
24
+ from flax.linen.attention import dot_product_attention_weights
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+
28
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
29
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
30
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
31
+ from .configuration_gpt_neo import GPTNeoConfig
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _CONFIG_FOR_DOC = "GPTNeoConfig"
37
+ _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B"
38
+
39
+
40
+ GPT_NEO_START_DOCSTRING = r"""
41
+
42
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
43
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
44
+ etc.)
45
+
46
+ This model is also a Flax Linen
47
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
48
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
49
+
50
+ Finally, this model supports inherent JAX features such as:
51
+
52
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
53
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
54
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
55
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
56
+
57
+ Parameters:
58
+ config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model.
59
+ Initializing with a config file does not load the weights associated with the model, only the
60
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
61
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
62
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
63
+ `jax.numpy.bfloat16` (on TPUs).
64
+
65
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
66
+ specified all the computation will be performed with the given `dtype`.
67
+
68
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
69
+ parameters.**
70
+
71
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
72
+ [`~FlaxPreTrainedModel.to_bf16`].
73
+ """
74
+
75
+ GPT_NEO_INPUTS_DOCSTRING = r"""
76
+ Args:
77
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
78
+ `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
79
+
80
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
81
+ [`PreTrainedTokenizer.__call__`] for details.
82
+
83
+ [What are input IDs?](../glossary#input-ids)
84
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
85
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
86
+
87
+ - 1 for tokens that are **not masked**,
88
+ - 0 for tokens that are **masked**.
89
+
90
+ [What are attention masks?](../glossary#attention-mask)
91
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
92
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
93
+ config.max_position_embeddings - 1]`.
94
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
95
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
96
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
97
+ output_attentions (`bool`, *optional*):
98
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
99
+ tensors for more detail.
100
+ output_hidden_states (`bool`, *optional*):
101
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
102
+ more detail.
103
+ return_dict (`bool`, *optional*):
104
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
105
+ """
106
+
107
+
108
+ class FlaxGPTNeoSelfAttention(nn.Module):
109
+ config: GPTNeoConfig
110
+ attention_type: str
111
+ dtype: jnp.dtype = jnp.float32
112
+
113
+ def setup(self):
114
+ config = self.config
115
+ self.embed_dim = config.hidden_size
116
+ self.num_heads = config.num_attention_heads
117
+ self.head_dim = self.embed_dim // self.num_heads
118
+ if self.head_dim * self.num_heads != self.embed_dim:
119
+ raise ValueError(
120
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and "
121
+ f"`num_heads`: {self.num_heads})."
122
+ )
123
+
124
+ self.attn_dropout = nn.Dropout(config.attention_dropout)
125
+ self.resid_dropout = nn.Dropout(config.resid_dropout)
126
+
127
+ dense = partial(
128
+ nn.Dense,
129
+ self.embed_dim,
130
+ dtype=self.dtype,
131
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
132
+ )
133
+
134
+ self.q_proj, self.k_proj, self.v_proj = dense(use_bias=False), dense(use_bias=False), dense(use_bias=False)
135
+ self.out_proj = dense()
136
+
137
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
138
+ if self.attention_type == "local":
139
+ self.causal_mask = self.causal_mask ^ jnp.tril(self.causal_mask, -config.window_size)
140
+
141
+ def _split_heads(self, hidden_states):
142
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
143
+
144
+ def _merge_heads(self, hidden_states):
145
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
146
+
147
+ @nn.compact
148
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
149
+ """
150
+ This function takes projected key, value states from a single input token and concatenates the states to cached
151
+ states from previous steps. This function is slighly adapted from the official Flax repository:
152
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
153
+ """
154
+ # detect if we're initializing by absence of existing cache data.
155
+ is_initialized = self.has_variable("cache", "cached_key")
156
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
157
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
158
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
159
+
160
+ if is_initialized:
161
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
162
+ # update key, value caches with our new 1d spatial slices
163
+ cur_index = cache_index.value
164
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
165
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
166
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
167
+ cached_key.value = key
168
+ cached_value.value = value
169
+ num_updated_cache_vectors = query.shape[1]
170
+ cache_index.value = cache_index.value + num_updated_cache_vectors
171
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
172
+ pad_mask = jnp.broadcast_to(
173
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
174
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
175
+ )
176
+ attention_mask = combine_masks(pad_mask, attention_mask)
177
+ return key, value, attention_mask
178
+
179
+ def __call__(
180
+ self,
181
+ hidden_states,
182
+ attention_mask=None,
183
+ deterministic: bool = True,
184
+ init_cache: bool = False,
185
+ output_attentions: bool = False,
186
+ ):
187
+ query = self.q_proj(hidden_states) * jnp.sqrt(self.head_dim).astype(self.dtype)
188
+ key = self.k_proj(hidden_states)
189
+ value = self.v_proj(hidden_states)
190
+
191
+ query = self._split_heads(query)
192
+ key = self._split_heads(key)
193
+ value = self._split_heads(value)
194
+
195
+ query_length, key_length = query.shape[1], key.shape[1]
196
+
197
+ if self.has_variable("cache", "cached_key"):
198
+ mask_shift = self.variables["cache"]["cache_index"]
199
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
200
+ causal_mask = lax.dynamic_slice(
201
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
202
+ )
203
+ else:
204
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
205
+
206
+ batch_size = hidden_states.shape[0]
207
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
208
+
209
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
210
+ attention_mask = combine_masks(attention_mask, causal_mask)
211
+
212
+ dropout_rng = None
213
+ if not deterministic and self.config.attention_dropout > 0.0:
214
+ dropout_rng = self.make_rng("dropout")
215
+
216
+ # During fast autoregressive decoding, we feed one position at a time,
217
+ # and cache the keys and values step by step.
218
+ if self.has_variable("cache", "cached_key") or init_cache:
219
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
220
+
221
+ # transform boolean mask into float mask
222
+ attention_bias = lax.select(
223
+ attention_mask > 0,
224
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
225
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
226
+ )
227
+
228
+ # usual dot product attention
229
+ attn_weights = dot_product_attention_weights(
230
+ query,
231
+ key,
232
+ bias=attention_bias,
233
+ dropout_rng=dropout_rng,
234
+ dropout_rate=self.config.attention_dropout,
235
+ deterministic=deterministic,
236
+ dtype=self.dtype,
237
+ precision=None,
238
+ )
239
+
240
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
241
+ attn_output = self._merge_heads(attn_output)
242
+ attn_output = self.out_proj(attn_output)
243
+ attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
244
+
245
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
246
+ return outputs
247
+
248
+
249
+ class FlaxGPTNeoAttention(nn.Module):
250
+ config: GPTNeoConfig
251
+ layer_id: int = 0
252
+ dtype: jnp.dtype = jnp.float32
253
+
254
+ def setup(self):
255
+ attention_type = self.config.attention_layers[self.layer_id]
256
+ self.attention = FlaxGPTNeoSelfAttention(self.config, attention_type, dtype=self.dtype)
257
+
258
+ def __call__(
259
+ self,
260
+ hidden_states,
261
+ attention_mask=None,
262
+ deterministic: bool = True,
263
+ init_cache: bool = False,
264
+ output_attentions: bool = False,
265
+ ):
266
+ return self.attention(
267
+ hidden_states,
268
+ attention_mask=attention_mask,
269
+ deterministic=deterministic,
270
+ init_cache=init_cache,
271
+ output_attentions=output_attentions,
272
+ )
273
+
274
+
275
+ class FlaxGPTNeoMLP(nn.Module):
276
+ config: GPTNeoConfig
277
+ intermediate_size: int
278
+ dtype: jnp.dtype = jnp.float32
279
+
280
+ def setup(self):
281
+ embed_dim = self.config.hidden_size
282
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
283
+ self.c_fc = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init)
284
+ self.c_proj = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init)
285
+ self.act = ACT2FN[self.config.activation_function]
286
+ self.dropout = nn.Dropout(rate=self.config.resid_dropout)
287
+
288
+ def __call__(self, hidden_states, deterministic: bool = True):
289
+ hidden_states = self.c_fc(hidden_states)
290
+ hidden_states = self.act(hidden_states)
291
+ hidden_states = self.c_proj(hidden_states)
292
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
293
+ return hidden_states
294
+
295
+
296
+ class FlaxGPTNeoBlock(nn.Module):
297
+ config: GPTNeoConfig
298
+ layer_id: int = 0
299
+ dtype: jnp.dtype = jnp.float32
300
+
301
+ def setup(self):
302
+ hidden_size = self.config.hidden_size
303
+ inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * hidden_size
304
+
305
+ self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
306
+ self.attn = FlaxGPTNeoAttention(self.config, layer_id=self.layer_id, dtype=self.dtype)
307
+ self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
308
+ self.mlp = FlaxGPTNeoMLP(self.config, inner_dim, dtype=self.dtype)
309
+
310
+ def __call__(
311
+ self,
312
+ hidden_states,
313
+ attention_mask=None,
314
+ deterministic: bool = True,
315
+ init_cache: bool = False,
316
+ output_attentions: bool = False,
317
+ ):
318
+ residual = hidden_states
319
+ hidden_states = self.ln_1(hidden_states)
320
+ outputs = self.attn(
321
+ hidden_states,
322
+ attention_mask=attention_mask,
323
+ deterministic=deterministic,
324
+ init_cache=init_cache,
325
+ output_attentions=output_attentions,
326
+ )
327
+ # residual connection
328
+ attn_output = outputs[0]
329
+ hidden_states = attn_output + residual
330
+
331
+ residual = hidden_states
332
+ hidden_states = self.ln_2(hidden_states)
333
+ feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic)
334
+ # residual connection
335
+ hidden_states = residual + feed_forward_hidden_states
336
+
337
+ return (hidden_states,) + outputs[1:]
338
+
339
+
340
+ class FlaxGPTNeoPreTrainedModel(FlaxPreTrainedModel):
341
+ """
342
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
343
+ models.
344
+ """
345
+
346
+ config_class = GPTNeoConfig
347
+ base_model_prefix = "transformer"
348
+ module_class: nn.Module = None
349
+
350
+ def __init__(
351
+ self,
352
+ config: GPTNeoConfig,
353
+ input_shape: Tuple = (1, 1),
354
+ seed: int = 0,
355
+ dtype: jnp.dtype = jnp.float32,
356
+ _do_init: bool = True,
357
+ **kwargs,
358
+ ):
359
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
360
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
361
+
362
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
363
+ # init input tensors
364
+ input_ids = jnp.zeros(input_shape, dtype="i4")
365
+ attention_mask = jnp.ones_like(input_ids)
366
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
367
+ params_rng, dropout_rng = jax.random.split(rng)
368
+ rngs = {"params": params_rng, "dropout": dropout_rng}
369
+
370
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
371
+
372
+ if params is not None:
373
+ random_params = flatten_dict(unfreeze(random_params))
374
+ params = flatten_dict(unfreeze(params))
375
+ for missing_key in self._missing_keys:
376
+ params[missing_key] = random_params[missing_key]
377
+ self._missing_keys = set()
378
+ return freeze(unflatten_dict(params))
379
+ else:
380
+ return random_params
381
+
382
+ def init_cache(self, batch_size, max_length):
383
+ r"""
384
+ Args:
385
+ batch_size (`int`):
386
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
387
+ max_length (`int`):
388
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
389
+ cache.
390
+ """
391
+ # init input variables to retrieve cache
392
+ input_ids = jnp.ones((batch_size, max_length))
393
+ attention_mask = jnp.ones_like(input_ids)
394
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
395
+
396
+ init_variables = self.module.init(
397
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
398
+ )
399
+ return unfreeze(init_variables["cache"])
400
+
401
+ @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)
402
+ def __call__(
403
+ self,
404
+ input_ids,
405
+ attention_mask=None,
406
+ position_ids=None,
407
+ params: dict = None,
408
+ past_key_values: dict = None,
409
+ dropout_rng: jax.random.PRNGKey = None,
410
+ train: bool = False,
411
+ output_attentions: Optional[bool] = None,
412
+ output_hidden_states: Optional[bool] = None,
413
+ return_dict: Optional[bool] = None,
414
+ ):
415
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
416
+ output_hidden_states = (
417
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
418
+ )
419
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
420
+
421
+ batch_size, sequence_length = input_ids.shape
422
+
423
+ if position_ids is None:
424
+ if past_key_values is not None:
425
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
426
+
427
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
428
+
429
+ if attention_mask is None:
430
+ attention_mask = jnp.ones((batch_size, sequence_length))
431
+
432
+ # Handle any PRNG if needed
433
+ rngs = {}
434
+ if dropout_rng is not None:
435
+ rngs["dropout"] = dropout_rng
436
+
437
+ inputs = {"params": params or self.params}
438
+
439
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTNeoAttention module
440
+ if past_key_values:
441
+ inputs["cache"] = past_key_values
442
+ mutable = ["cache"]
443
+ else:
444
+ mutable = False
445
+
446
+ outputs = self.module.apply(
447
+ inputs,
448
+ jnp.array(input_ids, dtype="i4"),
449
+ jnp.array(attention_mask, dtype="i4"),
450
+ jnp.array(position_ids, dtype="i4"),
451
+ not train,
452
+ False,
453
+ output_attentions,
454
+ output_hidden_states,
455
+ return_dict,
456
+ rngs=rngs,
457
+ mutable=mutable,
458
+ )
459
+
460
+ # add updated cache to model output
461
+ if past_key_values is not None and return_dict:
462
+ outputs, past_key_values = outputs
463
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
464
+ return outputs
465
+ elif past_key_values is not None and not return_dict:
466
+ outputs, past_key_values = outputs
467
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
468
+
469
+ return outputs
470
+
471
+
472
+ class FlaxGPTNeoBlockCollection(nn.Module):
473
+ config: GPTNeoConfig
474
+ dtype: jnp.dtype = jnp.float32
475
+
476
+ def setup(self):
477
+ self.blocks = [
478
+ FlaxGPTNeoBlock(self.config, layer_id=i, name=str(i), dtype=self.dtype)
479
+ for i in range(self.config.num_hidden_layers)
480
+ ]
481
+
482
+ def __call__(
483
+ self,
484
+ hidden_states,
485
+ attention_mask=None,
486
+ deterministic: bool = True,
487
+ init_cache: bool = False,
488
+ output_attentions: bool = False,
489
+ output_hidden_states: bool = False,
490
+ return_dict: bool = True,
491
+ ):
492
+ all_attentions = () if output_attentions else None
493
+ all_hidden_states = () if output_hidden_states else None
494
+
495
+ for block in self.blocks:
496
+ if output_hidden_states:
497
+ all_hidden_states += (hidden_states,)
498
+
499
+ layer_outputs = block(
500
+ hidden_states,
501
+ attention_mask,
502
+ deterministic=deterministic,
503
+ init_cache=init_cache,
504
+ output_attentions=output_attentions,
505
+ )
506
+ hidden_states = layer_outputs[0]
507
+
508
+ if output_attentions:
509
+ all_attentions += (layer_outputs[1],)
510
+
511
+ # this contains possible `None` values - `FlaxGPTNeoModule` will filter them out
512
+ outputs = (hidden_states, all_hidden_states, all_attentions)
513
+
514
+ return outputs
515
+
516
+
517
+ class FlaxGPTNeoModule(nn.Module):
518
+ config: GPTNeoConfig
519
+ dtype: jnp.dtype = jnp.float32
520
+
521
+ def setup(self):
522
+ self.embed_dim = self.config.hidden_size
523
+ embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range)
524
+ self.wte = nn.Embed(
525
+ self.config.vocab_size,
526
+ self.embed_dim,
527
+ embedding_init=embedding_init,
528
+ )
529
+ self.wpe = nn.Embed(
530
+ self.config.max_position_embeddings,
531
+ self.embed_dim,
532
+ embedding_init=embedding_init,
533
+ )
534
+ self.dropout = nn.Dropout(rate=self.config.embed_dropout)
535
+ self.h = FlaxGPTNeoBlockCollection(self.config, dtype=self.dtype)
536
+ self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
537
+
538
+ def __call__(
539
+ self,
540
+ input_ids,
541
+ attention_mask,
542
+ position_ids,
543
+ deterministic=True,
544
+ init_cache: bool = False,
545
+ output_attentions: bool = False,
546
+ output_hidden_states: bool = False,
547
+ return_dict: bool = True,
548
+ ):
549
+ input_embeds = self.wte(input_ids.astype("i4"))
550
+ position_embeds = self.wpe(position_ids.astype("i4"))
551
+
552
+ hidden_states = input_embeds + position_embeds
553
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
554
+
555
+ outputs = self.h(
556
+ hidden_states,
557
+ attention_mask,
558
+ deterministic=deterministic,
559
+ init_cache=init_cache,
560
+ output_attentions=output_attentions,
561
+ output_hidden_states=output_hidden_states,
562
+ return_dict=return_dict,
563
+ )
564
+
565
+ hidden_states = outputs[0]
566
+ hidden_states = self.ln_f(hidden_states)
567
+
568
+ hidden_states = outputs[0]
569
+ hidden_states = self.ln_f(hidden_states)
570
+
571
+ if output_hidden_states:
572
+ all_hidden_states = outputs[1] + (hidden_states,)
573
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
574
+ else:
575
+ outputs = (hidden_states,) + outputs[1:]
576
+
577
+ if not return_dict:
578
+ return tuple(v for v in outputs if v is not None)
579
+
580
+ return FlaxBaseModelOutput(
581
+ last_hidden_state=hidden_states,
582
+ hidden_states=outputs[1],
583
+ attentions=outputs[-1],
584
+ )
585
+
586
+
587
+ @add_start_docstrings(
588
+ "The bare GPTNeo Model transformer outputting raw hidden-states without any specific head on top.",
589
+ GPT_NEO_START_DOCSTRING,
590
+ )
591
+ class FlaxGPTNeoModel(FlaxGPTNeoPreTrainedModel):
592
+ module_class = FlaxGPTNeoModule
593
+
594
+
595
+ append_call_sample_docstring(FlaxGPTNeoModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)
596
+
597
+
598
+ class FlaxGPTNeoForCausalLMModule(nn.Module):
599
+ config: GPTNeoConfig
600
+ dtype: jnp.dtype = jnp.float32
601
+
602
+ def setup(self):
603
+ self.transformer = FlaxGPTNeoModule(self.config, dtype=self.dtype)
604
+ self.lm_head = nn.Dense(
605
+ self.config.vocab_size,
606
+ use_bias=False,
607
+ dtype=self.dtype,
608
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
609
+ )
610
+
611
+ def __call__(
612
+ self,
613
+ input_ids,
614
+ attention_mask,
615
+ position_ids,
616
+ deterministic: bool = True,
617
+ init_cache: bool = False,
618
+ output_attentions: bool = False,
619
+ output_hidden_states: bool = False,
620
+ return_dict: bool = True,
621
+ ):
622
+ outputs = self.transformer(
623
+ input_ids,
624
+ attention_mask,
625
+ position_ids,
626
+ deterministic=deterministic,
627
+ init_cache=init_cache,
628
+ output_attentions=output_attentions,
629
+ output_hidden_states=output_hidden_states,
630
+ return_dict=return_dict,
631
+ )
632
+
633
+ hidden_states = outputs[0]
634
+
635
+ if self.config.tie_word_embeddings:
636
+ shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
637
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
638
+ else:
639
+ lm_logits = self.lm_head(hidden_states)
640
+
641
+ if not return_dict:
642
+ return (lm_logits,) + outputs[1:]
643
+
644
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
645
+
646
+
647
+ @add_start_docstrings(
648
+ """
649
+ The GPTNeo Model transformer with a language modeling head on top (linear layer with weights tied to the input
650
+ embeddings).
651
+ """,
652
+ GPT_NEO_START_DOCSTRING,
653
+ )
654
+ class FlaxGPTNeoForCausalLM(FlaxGPTNeoPreTrainedModel):
655
+ module_class = FlaxGPTNeoForCausalLMModule
656
+
657
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
658
+ # initializing the cache
659
+ batch_size, seq_length = input_ids.shape
660
+
661
+ past_key_values = self.init_cache(batch_size, max_length)
662
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
663
+ # But since GPTNeo uses a causal mask, those positions are masked anyways.
664
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
665
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
666
+ if attention_mask is not None:
667
+ position_ids = attention_mask.cumsum(axis=-1) - 1
668
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
669
+ else:
670
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
671
+
672
+ return {
673
+ "past_key_values": past_key_values,
674
+ "attention_mask": extended_attention_mask,
675
+ "position_ids": position_ids,
676
+ }
677
+
678
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
679
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
680
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
681
+ return model_kwargs
682
+
683
+
684
+ append_call_sample_docstring(FlaxGPTNeoForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC)
llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_gpt_neo.py ADDED
@@ -0,0 +1,1346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Eleuther AI and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPT Neo model."""
16
+
17
+
18
+ import os
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn.functional as F
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithPast,
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ CausalLMOutputWithCrossAttentions,
33
+ CausalLMOutputWithPast,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutputWithPast,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel
39
+ from ...pytorch_utils import is_torch_greater_or_equal_than_1_13
40
+ from ...utils import (
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ is_flash_attn_2_available,
45
+ is_flash_attn_greater_or_equal_2_10,
46
+ is_torch_fx_available,
47
+ logging,
48
+ )
49
+ from .configuration_gpt_neo import GPTNeoConfig
50
+
51
+
52
+ if is_flash_attn_2_available():
53
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
54
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
55
+
56
+
57
+ # This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
58
+ # It means that the function will not be traced through and simply appear as a node in the graph.
59
+ if is_torch_fx_available():
60
+ if not is_torch_greater_or_equal_than_1_13:
61
+ import torch.fx
62
+
63
+ _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
64
+
65
+
66
+ logger = logging.get_logger(__name__)
67
+
68
+ _CONFIG_FOR_DOC = "GPTNeoConfig"
69
+
70
+
71
+ from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
72
+
73
+
74
+ _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B"
75
+
76
+
77
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
78
+ def _get_unpad_data(attention_mask):
79
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
80
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
81
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
82
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
83
+ return (
84
+ indices,
85
+ cu_seqlens,
86
+ max_seqlen_in_batch,
87
+ )
88
+
89
+
90
+ def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path):
91
+ """Load tf checkpoints in a pytorch model"""
92
+ try:
93
+ import re
94
+
95
+ import tensorflow as tf
96
+ except ImportError:
97
+ logger.error(
98
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
99
+ "https://www.tensorflow.org/install/ for installation instructions."
100
+ )
101
+ raise
102
+ tf_path = os.path.abspath(gpt_neo_checkpoint_path)
103
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
104
+ # Load weights from TF model
105
+ init_vars = tf.train.list_variables(tf_path)
106
+ names = []
107
+ arrays = []
108
+ for name, shape in init_vars:
109
+ if "global_step" not in name and "adam" not in name:
110
+ array = tf.train.load_variable(tf_path, name)
111
+ array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()
112
+ name = name.replace("attn/q", "attn/attention/q_proj/w")
113
+ name = name.replace("attn/k", "attn/attention/k_proj/w")
114
+ name = name.replace("attn/v", "attn/attention/v_proj/w")
115
+ name = name.replace("attn/o", "attn/attention/out_proj/w")
116
+ name = name.replace("norm_1", "ln_1")
117
+ name = name.replace("norm_2", "ln_2")
118
+ name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b")
119
+ name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w")
120
+ name = name.replace("conv1d_main/c_fc/bias", "c_fc/b")
121
+ name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w")
122
+ name = name.replace("conv1d_main/c_proj/bias", "c_proj/b")
123
+
124
+ names.append(name)
125
+ arrays.append(array)
126
+
127
+ for name, array in zip(names, arrays):
128
+ name = name[5:] # skip "gpt2/"
129
+ name = name.split("/")
130
+ pointer = model.transformer
131
+ for m_name in name:
132
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
133
+ scope_names = re.split(r"(\d+)", m_name)
134
+ else:
135
+ scope_names = [m_name]
136
+ if scope_names[0] == "w" or scope_names[0] == "g":
137
+ pointer = getattr(pointer, "weight")
138
+ elif scope_names[0] == "b":
139
+ pointer = getattr(pointer, "bias")
140
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
141
+ pointer = getattr(pointer, scope_names[0])
142
+ pointer = getattr(pointer, "weight")
143
+ else:
144
+ pointer = getattr(pointer, scope_names[0])
145
+ if len(scope_names) >= 2:
146
+ num = int(scope_names[1])
147
+ pointer = pointer[num]
148
+
149
+ if name[-1] == "w" and name[-2] in ["out_proj", "k_proj", "q_proj", "v_proj", "c_proj", "c_fc"]:
150
+ array = array.transpose()
151
+
152
+ if name == ["wte"]:
153
+ # if vocab is padded, then trim off the padding embeddings
154
+ array = array[: config.vocab_size]
155
+
156
+ if pointer.shape != array.shape:
157
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}")
158
+
159
+ print(f"Initialize PyTorch weight {name}")
160
+ pointer.data = torch.from_numpy(array)
161
+
162
+ # init the final linear layer using word embeddings
163
+ embs = model.transformer.wte.weight
164
+ lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False)
165
+ lin.weight = embs
166
+ model.set_output_embeddings(lin)
167
+ return model
168
+
169
+
170
+ class GPTNeoSelfAttention(nn.Module):
171
+ def __init__(self, config, attention_type):
172
+ super().__init__()
173
+ self.config = config
174
+
175
+ max_positions = config.max_position_embeddings
176
+ bias = torch.tril(torch.ones((max_positions, max_positions), dtype=bool)).view(
177
+ 1, 1, max_positions, max_positions
178
+ )
179
+
180
+ # local causal self attention is a sliding window where each token can only attend to the previous
181
+ # window_size tokens. This is implemented by updating the causal mask such that for each token
182
+ # all other tokens are masked except the previous window_size tokens.
183
+ if attention_type == "local":
184
+ bias = torch.bitwise_xor(bias, torch.tril(bias, -config.window_size))
185
+
186
+ self.register_buffer("bias", bias, persistent=False)
187
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
188
+
189
+ self.attn_dropout = nn.Dropout(float(config.attention_dropout))
190
+ self.resid_dropout = nn.Dropout(float(config.resid_dropout))
191
+ self.is_causal = True
192
+
193
+ self.embed_dim = config.hidden_size
194
+ self.num_heads = config.num_heads
195
+ self.head_dim = self.embed_dim // self.num_heads
196
+ if self.head_dim * self.num_heads != self.embed_dim:
197
+ raise ValueError(
198
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
199
+ f" {self.num_heads})."
200
+ )
201
+
202
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
203
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
204
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
205
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
206
+
207
+ def _split_heads(self, tensor, num_heads, attn_head_size):
208
+ """
209
+ Splits hidden_size dim into attn_head_size and num_heads
210
+ """
211
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
212
+ tensor = tensor.view(new_shape)
213
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
214
+
215
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
216
+ """
217
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
218
+ """
219
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
220
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
221
+ return tensor.view(new_shape)
222
+
223
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
224
+ # Keep the attention weights computation in fp32 to avoid overflow issues
225
+ query = query.to(torch.float32)
226
+ key = key.to(torch.float32)
227
+
228
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
229
+
230
+ query_length, key_length = query.size(-2), key.size(-2)
231
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
232
+ mask_value = torch.finfo(attn_weights.dtype).min
233
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
234
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
235
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
236
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
237
+
238
+ if attention_mask is not None:
239
+ # Apply the attention mask
240
+ attn_weights = attn_weights + attention_mask
241
+
242
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
243
+ attn_weights = attn_weights.to(value.dtype)
244
+ attn_weights = self.attn_dropout(attn_weights)
245
+
246
+ # Mask heads if we want to
247
+ if head_mask is not None:
248
+ attn_weights = attn_weights * head_mask
249
+
250
+ attn_output = torch.matmul(attn_weights, value)
251
+
252
+ return attn_output, attn_weights
253
+
254
+ def forward(
255
+ self,
256
+ hidden_states,
257
+ attention_mask=None,
258
+ layer_past=None,
259
+ head_mask=None,
260
+ use_cache=False,
261
+ output_attentions=False,
262
+ ):
263
+ query = self.q_proj(hidden_states)
264
+ key = self.k_proj(hidden_states)
265
+ value = self.v_proj(hidden_states)
266
+
267
+ query = self._split_heads(query, self.num_heads, self.head_dim)
268
+ key = self._split_heads(key, self.num_heads, self.head_dim)
269
+ value = self._split_heads(value, self.num_heads, self.head_dim)
270
+
271
+ if layer_past is not None:
272
+ past_key = layer_past[0]
273
+ past_value = layer_past[1]
274
+ key = torch.cat((past_key, key), dim=-2)
275
+ value = torch.cat((past_value, value), dim=-2)
276
+
277
+ if use_cache is True:
278
+ present = (key, value)
279
+ else:
280
+ present = None
281
+
282
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
283
+
284
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
285
+ attn_output = self.out_proj(attn_output)
286
+ attn_output = self.resid_dropout(attn_output)
287
+
288
+ outputs = (attn_output, present)
289
+ if output_attentions:
290
+ outputs += (attn_weights,)
291
+
292
+ return outputs # a, present, (attentions)
293
+
294
+
295
+ class GPTNeoFlashAttention2(GPTNeoSelfAttention):
296
+ """
297
+ GPTNeo flash attention module. This module inherits from `GPTNeoSelfAttention` as the weights of the module stays
298
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
299
+ flash attention and deal with padding tokens in case the input contains any of them.
300
+ """
301
+
302
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
303
+ def __init__(self, *args, **kwargs):
304
+ super().__init__(*args, **kwargs)
305
+
306
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
307
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
308
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
309
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
310
+
311
+ def forward(
312
+ self,
313
+ hidden_states,
314
+ attention_mask=None,
315
+ layer_past=None,
316
+ head_mask=None,
317
+ use_cache=False,
318
+ output_attentions=False,
319
+ ):
320
+ bsz, _, _ = hidden_states.size()
321
+
322
+ query = self.q_proj(hidden_states)
323
+ key = self.k_proj(hidden_states)
324
+ value = self.v_proj(hidden_states)
325
+
326
+ query = self._split_heads(query, self.num_heads, self.head_dim)
327
+ key = self._split_heads(key, self.num_heads, self.head_dim)
328
+ value = self._split_heads(value, self.num_heads, self.head_dim)
329
+
330
+ if layer_past is not None:
331
+ past_key = layer_past[0]
332
+ past_value = layer_past[1]
333
+ key = torch.cat((past_key, key), dim=-2)
334
+ value = torch.cat((past_value, value), dim=-2)
335
+
336
+ if use_cache is True:
337
+ present = (key, value)
338
+ else:
339
+ present = None
340
+
341
+ query_length = query.shape[2]
342
+ tgt_len = key.shape[2]
343
+
344
+ # Flash attention requires the input to have the shape
345
+ # batch_size x seq_length x head_dim x hidden_dim
346
+ query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim)
347
+ key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
348
+ value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
349
+
350
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
351
+
352
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
353
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
354
+ # cast them back in the correct dtype just to be sure everything works as expected.
355
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
356
+ # in fp32. (LlamaRMSNorm handles it correctly)
357
+
358
+ if query.dtype == torch.float32:
359
+ if torch.is_autocast_enabled():
360
+ target_dtype = torch.get_autocast_gpu_dtype()
361
+ # Handle the case where the model is quantized
362
+ elif hasattr(self.config, "_pre_quantization_dtype"):
363
+ target_dtype = self.config._pre_quantization_dtype
364
+ else:
365
+ target_dtype = self.q_proj.weight.dtype
366
+
367
+ logger.warning_once(
368
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
369
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
370
+ f" {target_dtype}."
371
+ )
372
+
373
+ query = query.to(target_dtype)
374
+ key = key.to(target_dtype)
375
+ value = value.to(target_dtype)
376
+
377
+ attn_output = self._flash_attention_forward(
378
+ query, key, value, attention_mask, query_length, dropout=attn_dropout, softmax_scale=1.0
379
+ )
380
+
381
+ attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim)
382
+ attn_output = self.out_proj(attn_weights_reshaped)
383
+ attn_output = self.resid_dropout(attn_output)
384
+
385
+ outputs = (attn_output, present)
386
+ if output_attentions:
387
+ outputs += (attn_weights_reshaped,)
388
+
389
+ return outputs
390
+
391
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
392
+ def _flash_attention_forward(
393
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
394
+ ):
395
+ """
396
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
397
+ first unpad the input, then computes the attention scores and pad the final attention scores.
398
+
399
+ Args:
400
+ query_states (`torch.Tensor`):
401
+ Input query states to be passed to Flash Attention API
402
+ key_states (`torch.Tensor`):
403
+ Input key states to be passed to Flash Attention API
404
+ value_states (`torch.Tensor`):
405
+ Input value states to be passed to Flash Attention API
406
+ attention_mask (`torch.Tensor`):
407
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
408
+ position of padding tokens and 1 for the position of non-padding tokens.
409
+ dropout (`float`):
410
+ Attention dropout
411
+ softmax_scale (`float`, *optional*):
412
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
413
+ """
414
+ if not self._flash_attn_uses_top_left_mask:
415
+ causal = self.is_causal
416
+ else:
417
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
418
+ causal = self.is_causal and query_length != 1
419
+
420
+ # Contains at least one padding token in the sequence
421
+ if attention_mask is not None:
422
+ batch_size = query_states.shape[0]
423
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
424
+ query_states, key_states, value_states, attention_mask, query_length
425
+ )
426
+
427
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
428
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
429
+
430
+ attn_output_unpad = flash_attn_varlen_func(
431
+ query_states,
432
+ key_states,
433
+ value_states,
434
+ cu_seqlens_q=cu_seqlens_q,
435
+ cu_seqlens_k=cu_seqlens_k,
436
+ max_seqlen_q=max_seqlen_in_batch_q,
437
+ max_seqlen_k=max_seqlen_in_batch_k,
438
+ dropout_p=dropout,
439
+ softmax_scale=softmax_scale,
440
+ causal=causal,
441
+ )
442
+
443
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
444
+ else:
445
+ attn_output = flash_attn_func(
446
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
447
+ )
448
+
449
+ return attn_output
450
+
451
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
452
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
453
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
454
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
455
+
456
+ key_layer = index_first_axis(
457
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
458
+ )
459
+ value_layer = index_first_axis(
460
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
461
+ )
462
+ if query_length == kv_seq_len:
463
+ query_layer = index_first_axis(
464
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
465
+ )
466
+ cu_seqlens_q = cu_seqlens_k
467
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
468
+ indices_q = indices_k
469
+ elif query_length == 1:
470
+ max_seqlen_in_batch_q = 1
471
+ cu_seqlens_q = torch.arange(
472
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
473
+ ) # There is a memcpy here, that is very bad.
474
+ indices_q = cu_seqlens_q[:-1]
475
+ query_layer = query_layer.squeeze(1)
476
+ else:
477
+ # The -q_len: slice assumes left padding.
478
+ attention_mask = attention_mask[:, -query_length:]
479
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
480
+
481
+ return (
482
+ query_layer,
483
+ key_layer,
484
+ value_layer,
485
+ indices_q,
486
+ (cu_seqlens_q, cu_seqlens_k),
487
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
488
+ )
489
+
490
+
491
+ GPT_NEO_ATTENTION_CLASSES = {
492
+ "eager": GPTNeoSelfAttention,
493
+ "flash_attention_2": GPTNeoFlashAttention2,
494
+ }
495
+
496
+
497
+ class GPTNeoAttention(nn.Module):
498
+ def __init__(self, config, layer_id=0):
499
+ super().__init__()
500
+ self.layer_id = layer_id
501
+ self.attention_layers = config.attention_layers
502
+ self.attention_type = self.attention_layers[layer_id]
503
+
504
+ if self.attention_type in ["global", "local"]:
505
+ self.attention = GPT_NEO_ATTENTION_CLASSES[config._attn_implementation](config, self.attention_type)
506
+ else:
507
+ raise NotImplementedError(
508
+ "Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: "
509
+ f"{config.attention_layers}. Select attn layer types from ['global', 'local'] only."
510
+ )
511
+
512
+ def forward(
513
+ self,
514
+ hidden_states,
515
+ layer_past=None,
516
+ attention_mask=None,
517
+ head_mask=None,
518
+ use_cache=False,
519
+ output_attentions=False,
520
+ ):
521
+ return self.attention(
522
+ hidden_states,
523
+ attention_mask=attention_mask,
524
+ layer_past=layer_past,
525
+ head_mask=head_mask,
526
+ use_cache=use_cache,
527
+ output_attentions=output_attentions,
528
+ )
529
+
530
+
531
+ class GPTNeoMLP(nn.Module):
532
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size
533
+ super().__init__()
534
+ embed_dim = config.hidden_size
535
+ self.c_fc = nn.Linear(embed_dim, intermediate_size)
536
+ self.c_proj = nn.Linear(intermediate_size, embed_dim)
537
+ self.act = ACT2FN[config.activation_function]
538
+ self.dropout = nn.Dropout(float(config.resid_dropout))
539
+
540
+ def forward(self, hidden_states):
541
+ hidden_states = self.c_fc(hidden_states)
542
+ hidden_states = self.act(hidden_states)
543
+ hidden_states = self.c_proj(hidden_states)
544
+ hidden_states = self.dropout(hidden_states)
545
+ return hidden_states
546
+
547
+
548
+ class GPTNeoBlock(nn.Module):
549
+ def __init__(self, config, layer_id):
550
+ super().__init__()
551
+ hidden_size = config.hidden_size
552
+ inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size
553
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
554
+ self.attn = GPTNeoAttention(config, layer_id)
555
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
556
+ self.mlp = GPTNeoMLP(inner_dim, config)
557
+
558
+ def forward(
559
+ self,
560
+ hidden_states,
561
+ layer_past=None,
562
+ attention_mask=None,
563
+ head_mask=None,
564
+ use_cache=False,
565
+ output_attentions=False,
566
+ ):
567
+ residual = hidden_states
568
+ hidden_states = self.ln_1(hidden_states)
569
+ attn_outputs = self.attn(
570
+ hidden_states,
571
+ layer_past=layer_past,
572
+ attention_mask=attention_mask,
573
+ head_mask=head_mask,
574
+ use_cache=use_cache,
575
+ output_attentions=output_attentions,
576
+ )
577
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
578
+ outputs = attn_outputs[1:]
579
+ # residual connection
580
+ hidden_states = attn_output + residual
581
+
582
+ residual = hidden_states
583
+ hidden_states = self.ln_2(hidden_states)
584
+ feed_forward_hidden_states = self.mlp(hidden_states)
585
+ # residual connection
586
+ hidden_states = residual + feed_forward_hidden_states
587
+
588
+ if use_cache:
589
+ outputs = (hidden_states,) + outputs
590
+ else:
591
+ outputs = (hidden_states,) + outputs[1:]
592
+
593
+ return outputs # hidden_states, present, (attentions, cross_attentions)
594
+
595
+
596
+ class GPTNeoPreTrainedModel(PreTrainedModel):
597
+ """
598
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
599
+ models.
600
+ """
601
+
602
+ config_class = GPTNeoConfig
603
+ load_tf_weights = load_tf_weights_in_gpt_neo
604
+ base_model_prefix = "transformer"
605
+ supports_gradient_checkpointing = True
606
+ _no_split_modules = ["GPTNeoBlock"]
607
+ _skip_keys_device_placement = "past_key_values"
608
+ _supports_flash_attn_2 = True
609
+
610
+ def __init__(self, *inputs, **kwargs):
611
+ super().__init__(*inputs, **kwargs)
612
+
613
+ def _init_weights(self, module):
614
+ """Initialize the weights."""
615
+ if isinstance(module, (nn.Linear,)):
616
+ # Slightly different from the TF version which uses truncated_normal for initialization
617
+ # cf https://github.com/pytorch/pytorch/pull/5617
618
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
619
+ if module.bias is not None:
620
+ module.bias.data.zero_()
621
+ elif isinstance(module, nn.Embedding):
622
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
623
+ if module.padding_idx is not None:
624
+ module.weight.data[module.padding_idx].zero_()
625
+ elif isinstance(module, nn.LayerNorm):
626
+ module.bias.data.zero_()
627
+ module.weight.data.fill_(1.0)
628
+
629
+
630
+ GPT_NEO_START_DOCSTRING = r"""
631
+
632
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
633
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
634
+ etc.)
635
+
636
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
637
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
638
+ and behavior.
639
+
640
+ Parameters:
641
+ config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model.
642
+ Initializing with a config file does not load the weights associated with the model, only the
643
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
644
+ """
645
+
646
+ GPT_NEO_INPUTS_DOCSTRING = r"""
647
+ Args:
648
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
649
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
650
+ `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
651
+ sequence tokens in the vocabulary.
652
+
653
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
654
+ `input_ids`.
655
+
656
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
657
+ [`PreTrainedTokenizer.__call__`] for details.
658
+
659
+ [What are input IDs?](../glossary#input-ids)
660
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.num_layers`):
661
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
662
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
663
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
664
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
665
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
666
+
667
+ - 1 for tokens that are **not masked**,
668
+ - 0 for tokens that are **masked**.
669
+
670
+ [What are attention masks?](../glossary#attention-mask)
671
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
672
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
673
+ 1]`:
674
+
675
+ - 0 corresponds to a *sentence A* token,
676
+ - 1 corresponds to a *sentence B* token.
677
+
678
+ [What are token type IDs?](../glossary#token-type-ids)
679
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
680
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
681
+ config.max_position_embeddings - 1]`.
682
+
683
+ [What are position IDs?](../glossary#position-ids)
684
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
685
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
686
+
687
+ - 1 indicates the head is **not masked**,
688
+ - 0 indicates the head is **masked**.
689
+
690
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
691
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
692
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
693
+ model's internal embedding lookup matrix.
694
+
695
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
696
+ `past_key_values`).
697
+ use_cache (`bool`, *optional*):
698
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
699
+ `past_key_values`).
700
+ output_attentions (`bool`, *optional*):
701
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
702
+ tensors for more detail.
703
+ output_hidden_states (`bool`, *optional*):
704
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
705
+ more detail.
706
+ return_dict (`bool`, *optional*):
707
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
708
+ """
709
+
710
+
711
+ @add_start_docstrings(
712
+ "The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top.",
713
+ GPT_NEO_START_DOCSTRING,
714
+ )
715
+ class GPTNeoModel(GPTNeoPreTrainedModel):
716
+ def __init__(self, config):
717
+ super().__init__(config)
718
+
719
+ self.embed_dim = config.hidden_size
720
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
721
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
722
+ self.drop = nn.Dropout(float(config.embed_dropout))
723
+ self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)])
724
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
725
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
726
+
727
+ self.gradient_checkpointing = False
728
+ # Initialize weights and apply final processing
729
+ self.post_init()
730
+
731
+ def get_input_embeddings(self):
732
+ return self.wte
733
+
734
+ def set_input_embeddings(self, new_embeddings):
735
+ self.wte = new_embeddings
736
+
737
+ @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)
738
+ @add_code_sample_docstrings(
739
+ checkpoint=_CHECKPOINT_FOR_DOC,
740
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
741
+ config_class=_CONFIG_FOR_DOC,
742
+ )
743
+ def forward(
744
+ self,
745
+ input_ids: Optional[torch.Tensor] = None,
746
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
747
+ attention_mask: Optional[torch.Tensor] = None,
748
+ token_type_ids: Optional[torch.Tensor] = None,
749
+ position_ids: Optional[torch.Tensor] = None,
750
+ head_mask: Optional[torch.Tensor] = None,
751
+ inputs_embeds: Optional[torch.Tensor] = None,
752
+ use_cache: Optional[bool] = None,
753
+ output_attentions: Optional[bool] = None,
754
+ output_hidden_states: Optional[bool] = None,
755
+ return_dict: Optional[bool] = None,
756
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
757
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
758
+ output_hidden_states = (
759
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
760
+ )
761
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
762
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
763
+
764
+ if input_ids is not None and inputs_embeds is not None:
765
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
766
+ elif input_ids is not None:
767
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
768
+ input_shape = input_ids.size()
769
+ input_ids = input_ids.view(-1, input_shape[-1])
770
+ elif inputs_embeds is not None:
771
+ input_shape = inputs_embeds.size()[:-1]
772
+ else:
773
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
774
+
775
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
776
+
777
+ if token_type_ids is not None:
778
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
779
+
780
+ if past_key_values is None:
781
+ past_length = 0
782
+ past_key_values = tuple([None] * len(self.h))
783
+ else:
784
+ past_length = past_key_values[0][0].size(-2)
785
+
786
+ if position_ids is None:
787
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
788
+ position_ids = position_ids.unsqueeze(0)
789
+
790
+ # Prepare head mask if needed
791
+ # 1.0 in head_mask indicate we keep the head
792
+ # attention_probs has shape bsz x num_heads x N x N
793
+ # head_mask has shape n_layer x batch x num_heads x N x N
794
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
795
+
796
+ if inputs_embeds is None:
797
+ inputs_embeds = self.wte(input_ids)
798
+ position_embeds = self.wpe(position_ids)
799
+ hidden_states = inputs_embeds + position_embeds
800
+
801
+ # Attention mask.
802
+ if self._use_flash_attention_2:
803
+ # 2d mask is passed through the layers
804
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
805
+ else:
806
+ # 4d mask is passed through the layers
807
+ attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_length)
808
+
809
+ if token_type_ids is not None:
810
+ token_type_embeds = self.wte(token_type_ids)
811
+ hidden_states = hidden_states + token_type_embeds
812
+
813
+ hidden_states = self.drop(hidden_states)
814
+
815
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
816
+
817
+ if self.gradient_checkpointing and self.training:
818
+ if use_cache:
819
+ logger.warning_once(
820
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
821
+ )
822
+ use_cache = False
823
+
824
+ presents = () if use_cache else None
825
+ all_self_attentions = () if output_attentions else None
826
+ all_hidden_states = () if output_hidden_states else None
827
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
828
+ if output_hidden_states:
829
+ all_hidden_states = all_hidden_states + (hidden_states,)
830
+
831
+ if self.gradient_checkpointing and self.training:
832
+ outputs = self._gradient_checkpointing_func(
833
+ block.__call__,
834
+ hidden_states,
835
+ None,
836
+ attention_mask,
837
+ head_mask[i],
838
+ use_cache,
839
+ output_attentions,
840
+ )
841
+ else:
842
+ outputs = block(
843
+ hidden_states,
844
+ layer_past=layer_past,
845
+ attention_mask=attention_mask,
846
+ head_mask=head_mask[i],
847
+ use_cache=use_cache,
848
+ output_attentions=output_attentions,
849
+ )
850
+
851
+ hidden_states = outputs[0]
852
+ if use_cache is True:
853
+ presents = presents + (outputs[1],)
854
+
855
+ if output_attentions:
856
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
857
+
858
+ hidden_states = self.ln_f(hidden_states)
859
+
860
+ hidden_states = hidden_states.view(output_shape)
861
+ # Add last hidden state
862
+ if output_hidden_states:
863
+ all_hidden_states = all_hidden_states + (hidden_states,)
864
+
865
+ if not return_dict:
866
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
867
+
868
+ return BaseModelOutputWithPast(
869
+ last_hidden_state=hidden_states,
870
+ past_key_values=presents,
871
+ hidden_states=all_hidden_states,
872
+ attentions=all_self_attentions,
873
+ )
874
+
875
+
876
+ @add_start_docstrings(
877
+ """
878
+ The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input
879
+ embeddings).
880
+ """,
881
+ GPT_NEO_START_DOCSTRING,
882
+ )
883
+ class GPTNeoForCausalLM(GPTNeoPreTrainedModel):
884
+ _tied_weights_keys = ["lm_head.weight"]
885
+
886
+ def __init__(self, config):
887
+ super().__init__(config)
888
+ self.transformer = GPTNeoModel(config)
889
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
890
+
891
+ # Initialize weights and apply final processing
892
+ self.post_init()
893
+
894
+ def get_output_embeddings(self):
895
+ return self.lm_head
896
+
897
+ def set_output_embeddings(self, new_embeddings):
898
+ self.lm_head = new_embeddings
899
+
900
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
901
+ token_type_ids = kwargs.get("token_type_ids", None)
902
+ # Omit tokens covered by past_key_values
903
+ if past_key_values:
904
+ past_length = past_key_values[0][0].shape[2]
905
+
906
+ # Some generation methods already pass only the last input ID
907
+ if input_ids.shape[1] > past_length:
908
+ remove_prefix_length = past_length
909
+ else:
910
+ # Default to old behavior: keep only final ID
911
+ remove_prefix_length = input_ids.shape[1] - 1
912
+
913
+ input_ids = input_ids[:, remove_prefix_length:]
914
+ if token_type_ids is not None:
915
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
916
+
917
+ attention_mask = kwargs.get("attention_mask", None)
918
+ position_ids = kwargs.get("position_ids", None)
919
+
920
+ if attention_mask is not None and position_ids is None:
921
+ # create position_ids on the fly for batch generation
922
+ position_ids = attention_mask.long().cumsum(-1) - 1
923
+ position_ids.masked_fill_(attention_mask == 0, 1)
924
+ if past_key_values:
925
+ position_ids = position_ids[:, -input_ids.shape[1] :]
926
+
927
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
928
+ if inputs_embeds is not None and past_key_values is None:
929
+ model_inputs = {"inputs_embeds": inputs_embeds}
930
+ else:
931
+ model_inputs = {"input_ids": input_ids}
932
+
933
+ model_inputs.update(
934
+ {
935
+ "past_key_values": past_key_values,
936
+ "use_cache": kwargs.get("use_cache"),
937
+ "position_ids": position_ids,
938
+ "attention_mask": attention_mask,
939
+ "token_type_ids": token_type_ids,
940
+ }
941
+ )
942
+
943
+ return model_inputs
944
+
945
+ @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)
946
+ @add_code_sample_docstrings(
947
+ checkpoint=_CHECKPOINT_FOR_DOC,
948
+ output_type=CausalLMOutputWithCrossAttentions,
949
+ config_class=_CONFIG_FOR_DOC,
950
+ )
951
+ def forward(
952
+ self,
953
+ input_ids: Optional[torch.Tensor] = None,
954
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
955
+ attention_mask: Optional[torch.Tensor] = None,
956
+ token_type_ids: Optional[torch.Tensor] = None,
957
+ position_ids: Optional[torch.Tensor] = None,
958
+ head_mask: Optional[torch.Tensor] = None,
959
+ inputs_embeds: Optional[torch.Tensor] = None,
960
+ labels: Optional[torch.Tensor] = None,
961
+ use_cache: Optional[bool] = None,
962
+ output_attentions: Optional[bool] = None,
963
+ output_hidden_states: Optional[bool] = None,
964
+ return_dict: Optional[bool] = None,
965
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
966
+ r"""
967
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
968
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
969
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
970
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
971
+ """
972
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
973
+
974
+ transformer_outputs = self.transformer(
975
+ input_ids,
976
+ past_key_values=past_key_values,
977
+ attention_mask=attention_mask,
978
+ token_type_ids=token_type_ids,
979
+ position_ids=position_ids,
980
+ head_mask=head_mask,
981
+ inputs_embeds=inputs_embeds,
982
+ use_cache=use_cache,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+ hidden_states = transformer_outputs[0]
988
+
989
+ lm_logits = self.lm_head(hidden_states)
990
+
991
+ loss = None
992
+ if labels is not None:
993
+ # move labels to correct device to enable model parallelism
994
+ labels = labels.to(lm_logits.device)
995
+ # Compute loss in fp32 to match with mesh-tf version
996
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
997
+ lm_logits = lm_logits.to(torch.float32)
998
+
999
+ # Shift so that tokens < n predict n
1000
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1001
+ shift_labels = labels[..., 1:].contiguous()
1002
+ # Flatten the tokens
1003
+ loss_fct = CrossEntropyLoss()
1004
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1005
+
1006
+ lm_logits = lm_logits.to(hidden_states.dtype)
1007
+ loss = loss.to(hidden_states.dtype)
1008
+
1009
+ if not return_dict:
1010
+ output = (lm_logits,) + transformer_outputs[1:]
1011
+ return ((loss,) + output) if loss is not None else output
1012
+
1013
+ return CausalLMOutputWithPast(
1014
+ loss=loss,
1015
+ logits=lm_logits,
1016
+ past_key_values=transformer_outputs.past_key_values,
1017
+ hidden_states=transformer_outputs.hidden_states,
1018
+ attentions=transformer_outputs.attentions,
1019
+ )
1020
+
1021
+ @staticmethod
1022
+ def _reorder_cache(
1023
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1024
+ ) -> Tuple[Tuple[torch.Tensor]]:
1025
+ """
1026
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
1027
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1028
+ beam_idx at every generation step.
1029
+ """
1030
+ return tuple(
1031
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1032
+ for layer_past in past_key_values
1033
+ )
1034
+
1035
+
1036
+ @add_start_docstrings(
1037
+ """
1038
+ The GPTNeo Model transformer with a sequence classification head on top (linear layer).
1039
+
1040
+ [`GPTNeoForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1041
+ (e.g. GPT-1) do.
1042
+
1043
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1044
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1045
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1046
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1047
+ each row of the batch).
1048
+ """,
1049
+ GPT_NEO_START_DOCSTRING,
1050
+ )
1051
+ class GPTNeoForSequenceClassification(GPTNeoPreTrainedModel):
1052
+ def __init__(self, config):
1053
+ super().__init__(config)
1054
+ self.num_labels = config.num_labels
1055
+ self.transformer = GPTNeoModel(config)
1056
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1057
+
1058
+ # Initialize weights and apply final processing
1059
+ self.post_init()
1060
+
1061
+ @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)
1062
+ @add_code_sample_docstrings(
1063
+ checkpoint=_CHECKPOINT_FOR_DOC,
1064
+ output_type=SequenceClassifierOutputWithPast,
1065
+ config_class=_CONFIG_FOR_DOC,
1066
+ )
1067
+ def forward(
1068
+ self,
1069
+ input_ids: Optional[torch.Tensor] = None,
1070
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1071
+ attention_mask: Optional[torch.Tensor] = None,
1072
+ token_type_ids: Optional[torch.Tensor] = None,
1073
+ position_ids: Optional[torch.Tensor] = None,
1074
+ head_mask: Optional[torch.Tensor] = None,
1075
+ inputs_embeds: Optional[torch.Tensor] = None,
1076
+ labels: Optional[torch.Tensor] = None,
1077
+ use_cache: Optional[bool] = None,
1078
+ output_attentions: Optional[bool] = None,
1079
+ output_hidden_states: Optional[bool] = None,
1080
+ return_dict: Optional[bool] = None,
1081
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
1082
+ r"""
1083
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1084
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1085
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1086
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1087
+ """
1088
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1089
+
1090
+ transformer_outputs = self.transformer(
1091
+ input_ids,
1092
+ past_key_values=past_key_values,
1093
+ attention_mask=attention_mask,
1094
+ token_type_ids=token_type_ids,
1095
+ position_ids=position_ids,
1096
+ head_mask=head_mask,
1097
+ inputs_embeds=inputs_embeds,
1098
+ use_cache=use_cache,
1099
+ output_attentions=output_attentions,
1100
+ output_hidden_states=output_hidden_states,
1101
+ return_dict=return_dict,
1102
+ )
1103
+ hidden_states = transformer_outputs[0]
1104
+ logits = self.score(hidden_states)
1105
+
1106
+ if input_ids is not None:
1107
+ batch_size, sequence_length = input_ids.shape[:2]
1108
+ else:
1109
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1110
+
1111
+ if self.config.pad_token_id is None and batch_size != 1:
1112
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1113
+ if self.config.pad_token_id is None:
1114
+ sequence_lengths = -1
1115
+ else:
1116
+ if input_ids is not None:
1117
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1118
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1119
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1120
+ sequence_lengths = sequence_lengths.to(logits.device)
1121
+ else:
1122
+ sequence_lengths = -1
1123
+ logger.warning(
1124
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1125
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1126
+ )
1127
+
1128
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1129
+
1130
+ loss = None
1131
+ if labels is not None:
1132
+ if self.config.problem_type is None:
1133
+ if self.num_labels == 1:
1134
+ self.config.problem_type = "regression"
1135
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1136
+ self.config.problem_type = "single_label_classification"
1137
+ else:
1138
+ self.config.problem_type = "multi_label_classification"
1139
+
1140
+ if self.config.problem_type == "regression":
1141
+ loss_fct = MSELoss()
1142
+ if self.num_labels == 1:
1143
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1144
+ else:
1145
+ loss = loss_fct(pooled_logits, labels)
1146
+ elif self.config.problem_type == "single_label_classification":
1147
+ loss_fct = CrossEntropyLoss()
1148
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1149
+ elif self.config.problem_type == "multi_label_classification":
1150
+ loss_fct = BCEWithLogitsLoss()
1151
+ loss = loss_fct(pooled_logits, labels)
1152
+ if not return_dict:
1153
+ output = (pooled_logits,) + transformer_outputs[1:]
1154
+ return ((loss,) + output) if loss is not None else output
1155
+
1156
+ return SequenceClassifierOutputWithPast(
1157
+ loss=loss,
1158
+ logits=pooled_logits,
1159
+ past_key_values=transformer_outputs.past_key_values,
1160
+ hidden_states=transformer_outputs.hidden_states,
1161
+ attentions=transformer_outputs.attentions,
1162
+ )
1163
+
1164
+
1165
+ @add_start_docstrings(
1166
+ """
1167
+ GPT Neo model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1168
+ Named-Entity-Recognition (NER) tasks.
1169
+ """,
1170
+ GPT_NEO_START_DOCSTRING,
1171
+ )
1172
+ class GPTNeoForTokenClassification(GPTNeoPreTrainedModel):
1173
+ def __init__(self, config):
1174
+ super().__init__(config)
1175
+ self.num_labels = config.num_labels
1176
+
1177
+ self.transformer = GPTNeoModel(config)
1178
+ self.dropout = nn.Dropout(config.classifier_dropout)
1179
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1180
+
1181
+ # Initialize weights and apply final processing
1182
+ self.post_init()
1183
+
1184
+ @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)
1185
+ @add_code_sample_docstrings(
1186
+ checkpoint="EleutherAI/gpt-neo-125m",
1187
+ output_type=TokenClassifierOutput,
1188
+ config_class=_CONFIG_FOR_DOC,
1189
+ expected_loss=0.25,
1190
+ )
1191
+ def forward(
1192
+ self,
1193
+ input_ids: Optional[torch.LongTensor] = None,
1194
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1195
+ attention_mask: Optional[torch.FloatTensor] = None,
1196
+ token_type_ids: Optional[torch.LongTensor] = None,
1197
+ position_ids: Optional[torch.LongTensor] = None,
1198
+ head_mask: Optional[torch.FloatTensor] = None,
1199
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1200
+ labels: Optional[torch.LongTensor] = None,
1201
+ use_cache: Optional[bool] = None,
1202
+ output_attentions: Optional[bool] = None,
1203
+ output_hidden_states: Optional[bool] = None,
1204
+ return_dict: Optional[bool] = None,
1205
+ ) -> Union[Tuple, TokenClassifierOutput]:
1206
+ r"""
1207
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1208
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1209
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1210
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1211
+ """
1212
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1213
+
1214
+ transformer_outputs = self.transformer(
1215
+ input_ids,
1216
+ past_key_values=past_key_values,
1217
+ attention_mask=attention_mask,
1218
+ token_type_ids=token_type_ids,
1219
+ position_ids=position_ids,
1220
+ head_mask=head_mask,
1221
+ inputs_embeds=inputs_embeds,
1222
+ use_cache=use_cache,
1223
+ output_attentions=output_attentions,
1224
+ output_hidden_states=output_hidden_states,
1225
+ return_dict=return_dict,
1226
+ )
1227
+
1228
+ hidden_states = transformer_outputs[0]
1229
+ hidden_states = self.dropout(hidden_states)
1230
+ logits = self.classifier(hidden_states)
1231
+
1232
+ loss = None
1233
+ if labels is not None:
1234
+ labels = labels.to(logits.device)
1235
+ loss_fct = CrossEntropyLoss()
1236
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1237
+
1238
+ if not return_dict:
1239
+ output = (logits,) + transformer_outputs[2:]
1240
+ return ((loss,) + output) if loss is not None else output
1241
+
1242
+ return TokenClassifierOutput(
1243
+ loss=loss,
1244
+ logits=logits,
1245
+ hidden_states=transformer_outputs.hidden_states,
1246
+ attentions=transformer_outputs.attentions,
1247
+ )
1248
+
1249
+
1250
+ @add_start_docstrings(
1251
+ """
1252
+ The GPT-Neo Model transformer with a span classification head on top for extractive question-answering tasks like
1253
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1254
+ """,
1255
+ GPT_NEO_START_DOCSTRING,
1256
+ )
1257
+ class GPTNeoForQuestionAnswering(GPTNeoPreTrainedModel):
1258
+ def __init__(self, config):
1259
+ super().__init__(config)
1260
+ self.num_labels = config.num_labels
1261
+ self.transformer = GPTNeoModel(config)
1262
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1263
+
1264
+ # Initialize weights and apply final processing
1265
+ self.post_init()
1266
+
1267
+ @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1268
+ @add_code_sample_docstrings(
1269
+ checkpoint=_CHECKPOINT_FOR_DOC,
1270
+ output_type=QuestionAnsweringModelOutput,
1271
+ config_class=_CONFIG_FOR_DOC,
1272
+ real_checkpoint=_CHECKPOINT_FOR_DOC,
1273
+ )
1274
+ def forward(
1275
+ self,
1276
+ input_ids: Optional[torch.LongTensor] = None,
1277
+ attention_mask: Optional[torch.FloatTensor] = None,
1278
+ token_type_ids: Optional[torch.LongTensor] = None,
1279
+ position_ids: Optional[torch.LongTensor] = None,
1280
+ head_mask: Optional[torch.FloatTensor] = None,
1281
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1282
+ start_positions: Optional[torch.LongTensor] = None,
1283
+ end_positions: Optional[torch.LongTensor] = None,
1284
+ output_attentions: Optional[bool] = None,
1285
+ output_hidden_states: Optional[bool] = None,
1286
+ return_dict: Optional[bool] = None,
1287
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1288
+ r"""
1289
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1290
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1291
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1292
+ are not taken into account for computing the loss.
1293
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1294
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1295
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1296
+ are not taken into account for computing the loss.
1297
+ """
1298
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1299
+
1300
+ outputs = self.transformer(
1301
+ input_ids,
1302
+ attention_mask=attention_mask,
1303
+ token_type_ids=token_type_ids,
1304
+ position_ids=position_ids,
1305
+ head_mask=head_mask,
1306
+ inputs_embeds=inputs_embeds,
1307
+ output_attentions=output_attentions,
1308
+ output_hidden_states=output_hidden_states,
1309
+ return_dict=return_dict,
1310
+ )
1311
+
1312
+ sequence_output = outputs[0]
1313
+
1314
+ logits = self.qa_outputs(sequence_output)
1315
+ start_logits, end_logits = logits.split(1, dim=-1)
1316
+ start_logits = start_logits.squeeze(-1).contiguous()
1317
+ end_logits = end_logits.squeeze(-1).contiguous()
1318
+
1319
+ total_loss = None
1320
+ if start_positions is not None and end_positions is not None:
1321
+ # If we are on multi-GPU, split add a dimension
1322
+ if len(start_positions.size()) > 1:
1323
+ start_positions = start_positions.squeeze(-1)
1324
+ if len(end_positions.size()) > 1:
1325
+ end_positions = end_positions.squeeze(-1)
1326
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1327
+ ignored_index = start_logits.size(1)
1328
+ start_positions = start_positions.clamp(0, ignored_index)
1329
+ end_positions = end_positions.clamp(0, ignored_index)
1330
+
1331
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1332
+ start_loss = loss_fct(start_logits, start_positions)
1333
+ end_loss = loss_fct(end_logits, end_positions)
1334
+ total_loss = (start_loss + end_loss) / 2
1335
+
1336
+ if not return_dict:
1337
+ output = (start_logits, end_logits) + outputs[2:]
1338
+ return ((total_loss,) + output) if total_loss is not None else output
1339
+
1340
+ return QuestionAnsweringModelOutput(
1341
+ loss=total_loss,
1342
+ start_logits=start_logits,
1343
+ end_logits=end_logits,
1344
+ hidden_states=outputs.hidden_states,
1345
+ attentions=outputs.attentions,
1346
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/configuration_grounding_dino.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/convert_grounding_dino_to_hf.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/image_processing_grounding_dino.cpython-310.pyc ADDED
Binary file (48.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/modeling_grounding_dino.cpython-310.pyc ADDED
Binary file (112 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__pycache__/processing_grounding_dino.cpython-310.pyc ADDED
Binary file (8.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/modeling_tf_layoutlm.py ADDED
@@ -0,0 +1,1685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 LayoutLM model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ import warnings
22
+ from typing import Dict, Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+
27
+ from ...activations_tf import get_tf_activation
28
+ from ...modeling_tf_outputs import (
29
+ TFBaseModelOutputWithPastAndCrossAttentions,
30
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
31
+ TFMaskedLMOutput,
32
+ TFQuestionAnsweringModelOutput,
33
+ TFSequenceClassifierOutput,
34
+ TFTokenClassifierOutput,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFMaskedLanguageModelingLoss,
38
+ TFModelInputType,
39
+ TFPreTrainedModel,
40
+ TFQuestionAnsweringLoss,
41
+ TFSequenceClassificationLoss,
42
+ TFTokenClassificationLoss,
43
+ get_initializer,
44
+ keras,
45
+ keras_serializable,
46
+ unpack_inputs,
47
+ )
48
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
49
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
50
+ from .configuration_layoutlm import LayoutLMConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CONFIG_FOR_DOC = "LayoutLMConfig"
56
+
57
+
58
+ from ..deprecated._archive_maps import TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ class TFLayoutLMEmbeddings(keras.layers.Layer):
62
+ """Construct the embeddings from word, position and token_type embeddings."""
63
+
64
+ def __init__(self, config: LayoutLMConfig, **kwargs):
65
+ super().__init__(**kwargs)
66
+
67
+ self.config = config
68
+ self.hidden_size = config.hidden_size
69
+ self.max_position_embeddings = config.max_position_embeddings
70
+ self.max_2d_position_embeddings = config.max_2d_position_embeddings
71
+ self.initializer_range = config.initializer_range
72
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
73
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
74
+
75
+ def build(self, input_shape=None):
76
+ with tf.name_scope("word_embeddings"):
77
+ self.weight = self.add_weight(
78
+ name="weight",
79
+ shape=[self.config.vocab_size, self.hidden_size],
80
+ initializer=get_initializer(self.initializer_range),
81
+ )
82
+
83
+ with tf.name_scope("token_type_embeddings"):
84
+ self.token_type_embeddings = self.add_weight(
85
+ name="embeddings",
86
+ shape=[self.config.type_vocab_size, self.hidden_size],
87
+ initializer=get_initializer(self.initializer_range),
88
+ )
89
+
90
+ with tf.name_scope("position_embeddings"):
91
+ self.position_embeddings = self.add_weight(
92
+ name="embeddings",
93
+ shape=[self.max_position_embeddings, self.hidden_size],
94
+ initializer=get_initializer(self.initializer_range),
95
+ )
96
+
97
+ with tf.name_scope("x_position_embeddings"):
98
+ self.x_position_embeddings = self.add_weight(
99
+ name="embeddings",
100
+ shape=[self.max_2d_position_embeddings, self.hidden_size],
101
+ initializer=get_initializer(self.initializer_range),
102
+ )
103
+
104
+ with tf.name_scope("y_position_embeddings"):
105
+ self.y_position_embeddings = self.add_weight(
106
+ name="embeddings",
107
+ shape=[self.max_2d_position_embeddings, self.hidden_size],
108
+ initializer=get_initializer(self.initializer_range),
109
+ )
110
+
111
+ with tf.name_scope("h_position_embeddings"):
112
+ self.h_position_embeddings = self.add_weight(
113
+ name="embeddings",
114
+ shape=[self.max_2d_position_embeddings, self.hidden_size],
115
+ initializer=get_initializer(self.initializer_range),
116
+ )
117
+
118
+ with tf.name_scope("w_position_embeddings"):
119
+ self.w_position_embeddings = self.add_weight(
120
+ name="embeddings",
121
+ shape=[self.max_2d_position_embeddings, self.hidden_size],
122
+ initializer=get_initializer(self.initializer_range),
123
+ )
124
+
125
+ if self.built:
126
+ return
127
+ self.built = True
128
+ if getattr(self, "LayerNorm", None) is not None:
129
+ with tf.name_scope(self.LayerNorm.name):
130
+ self.LayerNorm.build([None, None, self.config.hidden_size])
131
+
132
+ def call(
133
+ self,
134
+ input_ids: tf.Tensor = None,
135
+ bbox: tf.Tensor = None,
136
+ position_ids: tf.Tensor = None,
137
+ token_type_ids: tf.Tensor = None,
138
+ inputs_embeds: tf.Tensor = None,
139
+ training: bool = False,
140
+ ) -> tf.Tensor:
141
+ """
142
+ Applies embedding based on inputs tensor.
143
+
144
+ Returns:
145
+ final_embeddings (`tf.Tensor`): output embedding tensor.
146
+ """
147
+ assert not (input_ids is None and inputs_embeds is None)
148
+
149
+ if input_ids is not None:
150
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
151
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
152
+
153
+ input_shape = shape_list(inputs_embeds)[:-1]
154
+
155
+ if token_type_ids is None:
156
+ token_type_ids = tf.fill(dims=input_shape, value=0)
157
+
158
+ if position_ids is None:
159
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
160
+
161
+ if position_ids is None:
162
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
163
+
164
+ if bbox is None:
165
+ bbox = bbox = tf.fill(input_shape + [4], value=0)
166
+ try:
167
+ left_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 0])
168
+ upper_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 1])
169
+ right_position_embeddings = tf.gather(self.x_position_embeddings, bbox[:, :, 2])
170
+ lower_position_embeddings = tf.gather(self.y_position_embeddings, bbox[:, :, 3])
171
+ except IndexError as e:
172
+ raise IndexError("The `bbox`coordinate values should be within 0-1000 range.") from e
173
+ h_position_embeddings = tf.gather(self.h_position_embeddings, bbox[:, :, 3] - bbox[:, :, 1])
174
+ w_position_embeddings = tf.gather(self.w_position_embeddings, bbox[:, :, 2] - bbox[:, :, 0])
175
+
176
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
177
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
178
+ final_embeddings = (
179
+ inputs_embeds
180
+ + position_embeds
181
+ + token_type_embeds
182
+ + left_position_embeddings
183
+ + upper_position_embeddings
184
+ + right_position_embeddings
185
+ + lower_position_embeddings
186
+ + h_position_embeddings
187
+ + w_position_embeddings
188
+ )
189
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
190
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
191
+
192
+ return final_embeddings
193
+
194
+
195
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->LayoutLM
196
+ class TFLayoutLMSelfAttention(keras.layers.Layer):
197
+ def __init__(self, config: LayoutLMConfig, **kwargs):
198
+ super().__init__(**kwargs)
199
+
200
+ if config.hidden_size % config.num_attention_heads != 0:
201
+ raise ValueError(
202
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
203
+ f"of attention heads ({config.num_attention_heads})"
204
+ )
205
+
206
+ self.num_attention_heads = config.num_attention_heads
207
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
208
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
209
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
210
+
211
+ self.query = keras.layers.Dense(
212
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
213
+ )
214
+ self.key = keras.layers.Dense(
215
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
216
+ )
217
+ self.value = keras.layers.Dense(
218
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
219
+ )
220
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
221
+
222
+ self.is_decoder = config.is_decoder
223
+ self.config = config
224
+
225
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
226
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
227
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
228
+
229
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
230
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
231
+
232
+ def call(
233
+ self,
234
+ hidden_states: tf.Tensor,
235
+ attention_mask: tf.Tensor,
236
+ head_mask: tf.Tensor,
237
+ encoder_hidden_states: tf.Tensor,
238
+ encoder_attention_mask: tf.Tensor,
239
+ past_key_value: Tuple[tf.Tensor],
240
+ output_attentions: bool,
241
+ training: bool = False,
242
+ ) -> Tuple[tf.Tensor]:
243
+ batch_size = shape_list(hidden_states)[0]
244
+ mixed_query_layer = self.query(inputs=hidden_states)
245
+
246
+ # If this is instantiated as a cross-attention module, the keys
247
+ # and values come from an encoder; the attention mask needs to be
248
+ # such that the encoder's padding tokens are not attended to.
249
+ is_cross_attention = encoder_hidden_states is not None
250
+
251
+ if is_cross_attention and past_key_value is not None:
252
+ # reuse k,v, cross_attentions
253
+ key_layer = past_key_value[0]
254
+ value_layer = past_key_value[1]
255
+ attention_mask = encoder_attention_mask
256
+ elif is_cross_attention:
257
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
258
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
259
+ attention_mask = encoder_attention_mask
260
+ elif past_key_value is not None:
261
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
262
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
263
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
264
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
265
+ else:
266
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
267
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
268
+
269
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
270
+
271
+ if self.is_decoder:
272
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
273
+ # Further calls to cross_attention layer can then reuse all cross-attention
274
+ # key/value_states (first "if" case)
275
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
276
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
277
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
278
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
279
+ past_key_value = (key_layer, value_layer)
280
+
281
+ # Take the dot product between "query" and "key" to get the raw attention scores.
282
+ # (batch size, num_heads, seq_len_q, seq_len_k)
283
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
284
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
285
+ attention_scores = tf.divide(attention_scores, dk)
286
+
287
+ if attention_mask is not None:
288
+ # Apply the attention mask is (precomputed for all layers in TFLayoutLMModel call() function)
289
+ attention_scores = tf.add(attention_scores, attention_mask)
290
+
291
+ # Normalize the attention scores to probabilities.
292
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
293
+
294
+ # This is actually dropping out entire tokens to attend to, which might
295
+ # seem a bit unusual, but is taken from the original Transformer paper.
296
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
297
+
298
+ # Mask heads if we want to
299
+ if head_mask is not None:
300
+ attention_probs = tf.multiply(attention_probs, head_mask)
301
+
302
+ attention_output = tf.matmul(attention_probs, value_layer)
303
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
304
+
305
+ # (batch_size, seq_len_q, all_head_size)
306
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
307
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
308
+
309
+ if self.is_decoder:
310
+ outputs = outputs + (past_key_value,)
311
+ return outputs
312
+
313
+ def build(self, input_shape=None):
314
+ if self.built:
315
+ return
316
+ self.built = True
317
+ if getattr(self, "query", None) is not None:
318
+ with tf.name_scope(self.query.name):
319
+ self.query.build([None, None, self.config.hidden_size])
320
+ if getattr(self, "key", None) is not None:
321
+ with tf.name_scope(self.key.name):
322
+ self.key.build([None, None, self.config.hidden_size])
323
+ if getattr(self, "value", None) is not None:
324
+ with tf.name_scope(self.value.name):
325
+ self.value.build([None, None, self.config.hidden_size])
326
+
327
+
328
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->LayoutLM
329
+ class TFLayoutLMSelfOutput(keras.layers.Layer):
330
+ def __init__(self, config: LayoutLMConfig, **kwargs):
331
+ super().__init__(**kwargs)
332
+
333
+ self.dense = keras.layers.Dense(
334
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
335
+ )
336
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
337
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
338
+ self.config = config
339
+
340
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
341
+ hidden_states = self.dense(inputs=hidden_states)
342
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
343
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
344
+
345
+ return hidden_states
346
+
347
+ def build(self, input_shape=None):
348
+ if self.built:
349
+ return
350
+ self.built = True
351
+ if getattr(self, "dense", None) is not None:
352
+ with tf.name_scope(self.dense.name):
353
+ self.dense.build([None, None, self.config.hidden_size])
354
+ if getattr(self, "LayerNorm", None) is not None:
355
+ with tf.name_scope(self.LayerNorm.name):
356
+ self.LayerNorm.build([None, None, self.config.hidden_size])
357
+
358
+
359
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->LayoutLM
360
+ class TFLayoutLMAttention(keras.layers.Layer):
361
+ def __init__(self, config: LayoutLMConfig, **kwargs):
362
+ super().__init__(**kwargs)
363
+
364
+ self.self_attention = TFLayoutLMSelfAttention(config, name="self")
365
+ self.dense_output = TFLayoutLMSelfOutput(config, name="output")
366
+
367
+ def prune_heads(self, heads):
368
+ raise NotImplementedError
369
+
370
+ def call(
371
+ self,
372
+ input_tensor: tf.Tensor,
373
+ attention_mask: tf.Tensor,
374
+ head_mask: tf.Tensor,
375
+ encoder_hidden_states: tf.Tensor,
376
+ encoder_attention_mask: tf.Tensor,
377
+ past_key_value: Tuple[tf.Tensor],
378
+ output_attentions: bool,
379
+ training: bool = False,
380
+ ) -> Tuple[tf.Tensor]:
381
+ self_outputs = self.self_attention(
382
+ hidden_states=input_tensor,
383
+ attention_mask=attention_mask,
384
+ head_mask=head_mask,
385
+ encoder_hidden_states=encoder_hidden_states,
386
+ encoder_attention_mask=encoder_attention_mask,
387
+ past_key_value=past_key_value,
388
+ output_attentions=output_attentions,
389
+ training=training,
390
+ )
391
+ attention_output = self.dense_output(
392
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
393
+ )
394
+ # add attentions (possibly with past_key_value) if we output them
395
+ outputs = (attention_output,) + self_outputs[1:]
396
+
397
+ return outputs
398
+
399
+ def build(self, input_shape=None):
400
+ if self.built:
401
+ return
402
+ self.built = True
403
+ if getattr(self, "self_attention", None) is not None:
404
+ with tf.name_scope(self.self_attention.name):
405
+ self.self_attention.build(None)
406
+ if getattr(self, "dense_output", None) is not None:
407
+ with tf.name_scope(self.dense_output.name):
408
+ self.dense_output.build(None)
409
+
410
+
411
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->LayoutLM
412
+ class TFLayoutLMIntermediate(keras.layers.Layer):
413
+ def __init__(self, config: LayoutLMConfig, **kwargs):
414
+ super().__init__(**kwargs)
415
+
416
+ self.dense = keras.layers.Dense(
417
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
418
+ )
419
+
420
+ if isinstance(config.hidden_act, str):
421
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
422
+ else:
423
+ self.intermediate_act_fn = config.hidden_act
424
+ self.config = config
425
+
426
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
427
+ hidden_states = self.dense(inputs=hidden_states)
428
+ hidden_states = self.intermediate_act_fn(hidden_states)
429
+
430
+ return hidden_states
431
+
432
+ def build(self, input_shape=None):
433
+ if self.built:
434
+ return
435
+ self.built = True
436
+ if getattr(self, "dense", None) is not None:
437
+ with tf.name_scope(self.dense.name):
438
+ self.dense.build([None, None, self.config.hidden_size])
439
+
440
+
441
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->LayoutLM
442
+ class TFLayoutLMOutput(keras.layers.Layer):
443
+ def __init__(self, config: LayoutLMConfig, **kwargs):
444
+ super().__init__(**kwargs)
445
+
446
+ self.dense = keras.layers.Dense(
447
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
448
+ )
449
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
450
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
451
+ self.config = config
452
+
453
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
454
+ hidden_states = self.dense(inputs=hidden_states)
455
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
456
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
457
+
458
+ return hidden_states
459
+
460
+ def build(self, input_shape=None):
461
+ if self.built:
462
+ return
463
+ self.built = True
464
+ if getattr(self, "dense", None) is not None:
465
+ with tf.name_scope(self.dense.name):
466
+ self.dense.build([None, None, self.config.intermediate_size])
467
+ if getattr(self, "LayerNorm", None) is not None:
468
+ with tf.name_scope(self.LayerNorm.name):
469
+ self.LayerNorm.build([None, None, self.config.hidden_size])
470
+
471
+
472
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->LayoutLM
473
+ class TFLayoutLMLayer(keras.layers.Layer):
474
+ def __init__(self, config: LayoutLMConfig, **kwargs):
475
+ super().__init__(**kwargs)
476
+
477
+ self.attention = TFLayoutLMAttention(config, name="attention")
478
+ self.is_decoder = config.is_decoder
479
+ self.add_cross_attention = config.add_cross_attention
480
+ if self.add_cross_attention:
481
+ if not self.is_decoder:
482
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
483
+ self.crossattention = TFLayoutLMAttention(config, name="crossattention")
484
+ self.intermediate = TFLayoutLMIntermediate(config, name="intermediate")
485
+ self.bert_output = TFLayoutLMOutput(config, name="output")
486
+
487
+ def call(
488
+ self,
489
+ hidden_states: tf.Tensor,
490
+ attention_mask: tf.Tensor,
491
+ head_mask: tf.Tensor,
492
+ encoder_hidden_states: tf.Tensor | None,
493
+ encoder_attention_mask: tf.Tensor | None,
494
+ past_key_value: Tuple[tf.Tensor] | None,
495
+ output_attentions: bool,
496
+ training: bool = False,
497
+ ) -> Tuple[tf.Tensor]:
498
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
499
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
500
+ self_attention_outputs = self.attention(
501
+ input_tensor=hidden_states,
502
+ attention_mask=attention_mask,
503
+ head_mask=head_mask,
504
+ encoder_hidden_states=None,
505
+ encoder_attention_mask=None,
506
+ past_key_value=self_attn_past_key_value,
507
+ output_attentions=output_attentions,
508
+ training=training,
509
+ )
510
+ attention_output = self_attention_outputs[0]
511
+
512
+ # if decoder, the last output is tuple of self-attn cache
513
+ if self.is_decoder:
514
+ outputs = self_attention_outputs[1:-1]
515
+ present_key_value = self_attention_outputs[-1]
516
+ else:
517
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
518
+
519
+ cross_attn_present_key_value = None
520
+ if self.is_decoder and encoder_hidden_states is not None:
521
+ if not hasattr(self, "crossattention"):
522
+ raise ValueError(
523
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
524
+ " by setting `config.add_cross_attention=True`"
525
+ )
526
+
527
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
528
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
529
+ cross_attention_outputs = self.crossattention(
530
+ input_tensor=attention_output,
531
+ attention_mask=attention_mask,
532
+ head_mask=head_mask,
533
+ encoder_hidden_states=encoder_hidden_states,
534
+ encoder_attention_mask=encoder_attention_mask,
535
+ past_key_value=cross_attn_past_key_value,
536
+ output_attentions=output_attentions,
537
+ training=training,
538
+ )
539
+ attention_output = cross_attention_outputs[0]
540
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
541
+
542
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
543
+ cross_attn_present_key_value = cross_attention_outputs[-1]
544
+ present_key_value = present_key_value + cross_attn_present_key_value
545
+
546
+ intermediate_output = self.intermediate(hidden_states=attention_output)
547
+ layer_output = self.bert_output(
548
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
549
+ )
550
+ outputs = (layer_output,) + outputs # add attentions if we output them
551
+
552
+ # if decoder, return the attn key/values as the last output
553
+ if self.is_decoder:
554
+ outputs = outputs + (present_key_value,)
555
+
556
+ return outputs
557
+
558
+ def build(self, input_shape=None):
559
+ if self.built:
560
+ return
561
+ self.built = True
562
+ if getattr(self, "attention", None) is not None:
563
+ with tf.name_scope(self.attention.name):
564
+ self.attention.build(None)
565
+ if getattr(self, "intermediate", None) is not None:
566
+ with tf.name_scope(self.intermediate.name):
567
+ self.intermediate.build(None)
568
+ if getattr(self, "bert_output", None) is not None:
569
+ with tf.name_scope(self.bert_output.name):
570
+ self.bert_output.build(None)
571
+ if getattr(self, "crossattention", None) is not None:
572
+ with tf.name_scope(self.crossattention.name):
573
+ self.crossattention.build(None)
574
+
575
+
576
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->LayoutLM
577
+ class TFLayoutLMEncoder(keras.layers.Layer):
578
+ def __init__(self, config: LayoutLMConfig, **kwargs):
579
+ super().__init__(**kwargs)
580
+ self.config = config
581
+ self.layer = [TFLayoutLMLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
582
+
583
+ def call(
584
+ self,
585
+ hidden_states: tf.Tensor,
586
+ attention_mask: tf.Tensor,
587
+ head_mask: tf.Tensor,
588
+ encoder_hidden_states: tf.Tensor | None,
589
+ encoder_attention_mask: tf.Tensor | None,
590
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
591
+ use_cache: Optional[bool],
592
+ output_attentions: bool,
593
+ output_hidden_states: bool,
594
+ return_dict: bool,
595
+ training: bool = False,
596
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
597
+ all_hidden_states = () if output_hidden_states else None
598
+ all_attentions = () if output_attentions else None
599
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
600
+
601
+ next_decoder_cache = () if use_cache else None
602
+ for i, layer_module in enumerate(self.layer):
603
+ if output_hidden_states:
604
+ all_hidden_states = all_hidden_states + (hidden_states,)
605
+
606
+ past_key_value = past_key_values[i] if past_key_values is not None else None
607
+
608
+ layer_outputs = layer_module(
609
+ hidden_states=hidden_states,
610
+ attention_mask=attention_mask,
611
+ head_mask=head_mask[i],
612
+ encoder_hidden_states=encoder_hidden_states,
613
+ encoder_attention_mask=encoder_attention_mask,
614
+ past_key_value=past_key_value,
615
+ output_attentions=output_attentions,
616
+ training=training,
617
+ )
618
+ hidden_states = layer_outputs[0]
619
+
620
+ if use_cache:
621
+ next_decoder_cache += (layer_outputs[-1],)
622
+
623
+ if output_attentions:
624
+ all_attentions = all_attentions + (layer_outputs[1],)
625
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
626
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
627
+
628
+ # Add last layer
629
+ if output_hidden_states:
630
+ all_hidden_states = all_hidden_states + (hidden_states,)
631
+
632
+ if not return_dict:
633
+ return tuple(
634
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
635
+ )
636
+
637
+ return TFBaseModelOutputWithPastAndCrossAttentions(
638
+ last_hidden_state=hidden_states,
639
+ past_key_values=next_decoder_cache,
640
+ hidden_states=all_hidden_states,
641
+ attentions=all_attentions,
642
+ cross_attentions=all_cross_attentions,
643
+ )
644
+
645
+ def build(self, input_shape=None):
646
+ if self.built:
647
+ return
648
+ self.built = True
649
+ if getattr(self, "layer", None) is not None:
650
+ for layer in self.layer:
651
+ with tf.name_scope(layer.name):
652
+ layer.build(None)
653
+
654
+
655
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->LayoutLM
656
+ class TFLayoutLMPooler(keras.layers.Layer):
657
+ def __init__(self, config: LayoutLMConfig, **kwargs):
658
+ super().__init__(**kwargs)
659
+
660
+ self.dense = keras.layers.Dense(
661
+ units=config.hidden_size,
662
+ kernel_initializer=get_initializer(config.initializer_range),
663
+ activation="tanh",
664
+ name="dense",
665
+ )
666
+ self.config = config
667
+
668
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
669
+ # We "pool" the model by simply taking the hidden state corresponding
670
+ # to the first token.
671
+ first_token_tensor = hidden_states[:, 0]
672
+ pooled_output = self.dense(inputs=first_token_tensor)
673
+
674
+ return pooled_output
675
+
676
+ def build(self, input_shape=None):
677
+ if self.built:
678
+ return
679
+ self.built = True
680
+ if getattr(self, "dense", None) is not None:
681
+ with tf.name_scope(self.dense.name):
682
+ self.dense.build([None, None, self.config.hidden_size])
683
+
684
+
685
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->LayoutLM
686
+ class TFLayoutLMPredictionHeadTransform(keras.layers.Layer):
687
+ def __init__(self, config: LayoutLMConfig, **kwargs):
688
+ super().__init__(**kwargs)
689
+
690
+ self.dense = keras.layers.Dense(
691
+ units=config.hidden_size,
692
+ kernel_initializer=get_initializer(config.initializer_range),
693
+ name="dense",
694
+ )
695
+
696
+ if isinstance(config.hidden_act, str):
697
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
698
+ else:
699
+ self.transform_act_fn = config.hidden_act
700
+
701
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
702
+ self.config = config
703
+
704
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
705
+ hidden_states = self.dense(inputs=hidden_states)
706
+ hidden_states = self.transform_act_fn(hidden_states)
707
+ hidden_states = self.LayerNorm(inputs=hidden_states)
708
+
709
+ return hidden_states
710
+
711
+ def build(self, input_shape=None):
712
+ if self.built:
713
+ return
714
+ self.built = True
715
+ if getattr(self, "dense", None) is not None:
716
+ with tf.name_scope(self.dense.name):
717
+ self.dense.build([None, None, self.config.hidden_size])
718
+ if getattr(self, "LayerNorm", None) is not None:
719
+ with tf.name_scope(self.LayerNorm.name):
720
+ self.LayerNorm.build([None, None, self.config.hidden_size])
721
+
722
+
723
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->LayoutLM
724
+ class TFLayoutLMLMPredictionHead(keras.layers.Layer):
725
+ def __init__(self, config: LayoutLMConfig, input_embeddings: keras.layers.Layer, **kwargs):
726
+ super().__init__(**kwargs)
727
+
728
+ self.config = config
729
+ self.hidden_size = config.hidden_size
730
+
731
+ self.transform = TFLayoutLMPredictionHeadTransform(config, name="transform")
732
+
733
+ # The output weights are the same as the input embeddings, but there is
734
+ # an output-only bias for each token.
735
+ self.input_embeddings = input_embeddings
736
+
737
+ def build(self, input_shape=None):
738
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
739
+
740
+ if self.built:
741
+ return
742
+ self.built = True
743
+ if getattr(self, "transform", None) is not None:
744
+ with tf.name_scope(self.transform.name):
745
+ self.transform.build(None)
746
+
747
+ def get_output_embeddings(self) -> keras.layers.Layer:
748
+ return self.input_embeddings
749
+
750
+ def set_output_embeddings(self, value: tf.Variable):
751
+ self.input_embeddings.weight = value
752
+ self.input_embeddings.vocab_size = shape_list(value)[0]
753
+
754
+ def get_bias(self) -> Dict[str, tf.Variable]:
755
+ return {"bias": self.bias}
756
+
757
+ def set_bias(self, value: tf.Variable):
758
+ self.bias = value["bias"]
759
+ self.config.vocab_size = shape_list(value["bias"])[0]
760
+
761
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
762
+ hidden_states = self.transform(hidden_states=hidden_states)
763
+ seq_length = shape_list(hidden_states)[1]
764
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
765
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
766
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
767
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
768
+
769
+ return hidden_states
770
+
771
+
772
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->LayoutLM
773
+ class TFLayoutLMMLMHead(keras.layers.Layer):
774
+ def __init__(self, config: LayoutLMConfig, input_embeddings: keras.layers.Layer, **kwargs):
775
+ super().__init__(**kwargs)
776
+
777
+ self.predictions = TFLayoutLMLMPredictionHead(config, input_embeddings, name="predictions")
778
+
779
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
780
+ prediction_scores = self.predictions(hidden_states=sequence_output)
781
+
782
+ return prediction_scores
783
+
784
+ def build(self, input_shape=None):
785
+ if self.built:
786
+ return
787
+ self.built = True
788
+ if getattr(self, "predictions", None) is not None:
789
+ with tf.name_scope(self.predictions.name):
790
+ self.predictions.build(None)
791
+
792
+
793
+ @keras_serializable
794
+ class TFLayoutLMMainLayer(keras.layers.Layer):
795
+ config_class = LayoutLMConfig
796
+
797
+ def __init__(self, config: LayoutLMConfig, add_pooling_layer: bool = True, **kwargs):
798
+ super().__init__(**kwargs)
799
+
800
+ self.config = config
801
+
802
+ self.embeddings = TFLayoutLMEmbeddings(config, name="embeddings")
803
+ self.encoder = TFLayoutLMEncoder(config, name="encoder")
804
+ self.pooler = TFLayoutLMPooler(config, name="pooler") if add_pooling_layer else None
805
+
806
+ def get_input_embeddings(self) -> keras.layers.Layer:
807
+ return self.embeddings
808
+
809
+ def set_input_embeddings(self, value: tf.Variable):
810
+ self.embeddings.weight = value
811
+ self.embeddings.vocab_size = shape_list(value)[0]
812
+
813
+ def _prune_heads(self, heads_to_prune):
814
+ """
815
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
816
+ class PreTrainedModel
817
+ """
818
+ raise NotImplementedError
819
+
820
+ @unpack_inputs
821
+ def call(
822
+ self,
823
+ input_ids: TFModelInputType | None = None,
824
+ bbox: np.ndarray | tf.Tensor | None = None,
825
+ attention_mask: np.ndarray | tf.Tensor | None = None,
826
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
827
+ position_ids: np.ndarray | tf.Tensor | None = None,
828
+ head_mask: np.ndarray | tf.Tensor | None = None,
829
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
830
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
831
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
832
+ output_attentions: Optional[bool] = None,
833
+ output_hidden_states: Optional[bool] = None,
834
+ return_dict: Optional[bool] = None,
835
+ training: bool = False,
836
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
837
+ if input_ids is not None and inputs_embeds is not None:
838
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
839
+ elif input_ids is not None:
840
+ input_shape = shape_list(input_ids)
841
+ elif inputs_embeds is not None:
842
+ input_shape = shape_list(inputs_embeds)[:-1]
843
+ else:
844
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
845
+
846
+ if attention_mask is None:
847
+ attention_mask = tf.fill(dims=input_shape, value=1)
848
+
849
+ if token_type_ids is None:
850
+ token_type_ids = tf.fill(dims=input_shape, value=0)
851
+ if bbox is None:
852
+ bbox = tf.fill(dims=input_shape + [4], value=0)
853
+
854
+ embedding_output = self.embeddings(
855
+ input_ids=input_ids,
856
+ bbox=bbox,
857
+ position_ids=position_ids,
858
+ token_type_ids=token_type_ids,
859
+ inputs_embeds=inputs_embeds,
860
+ training=training,
861
+ )
862
+
863
+ # We create a 3D attention mask from a 2D tensor mask.
864
+ # Sizes are [batch_size, 1, 1, to_seq_length]
865
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
866
+ # this attention mask is more simple than the triangular masking of causal attention
867
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
868
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
869
+
870
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
871
+ # masked positions, this operation will create a tensor which is 0.0 for
872
+ # positions we want to attend and -10000.0 for masked positions.
873
+ # Since we are adding it to the raw scores before the softmax, this is
874
+ # effectively the same as removing these entirely.
875
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
876
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
877
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
878
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
879
+
880
+ # Prepare head mask if needed
881
+ # 1.0 in head_mask indicate we keep the head
882
+ # attention_probs has shape bsz x n_heads x N x N
883
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
884
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
885
+ if head_mask is not None:
886
+ raise NotImplementedError
887
+ else:
888
+ head_mask = [None] * self.config.num_hidden_layers
889
+
890
+ encoder_outputs = self.encoder(
891
+ hidden_states=embedding_output,
892
+ attention_mask=extended_attention_mask,
893
+ head_mask=head_mask,
894
+ # Need to pass these required positional arguments to `Encoder`
895
+ encoder_hidden_states=encoder_hidden_states,
896
+ encoder_attention_mask=None,
897
+ past_key_values=None,
898
+ use_cache=False,
899
+ output_attentions=output_attentions,
900
+ output_hidden_states=output_hidden_states,
901
+ return_dict=return_dict,
902
+ training=training,
903
+ )
904
+
905
+ sequence_output = encoder_outputs[0]
906
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
907
+
908
+ if not return_dict:
909
+ return (
910
+ sequence_output,
911
+ pooled_output,
912
+ ) + encoder_outputs[1:]
913
+
914
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
915
+ last_hidden_state=sequence_output,
916
+ pooler_output=pooled_output,
917
+ hidden_states=encoder_outputs.hidden_states,
918
+ attentions=encoder_outputs.attentions,
919
+ cross_attentions=encoder_outputs.cross_attentions,
920
+ )
921
+
922
+ def build(self, input_shape=None):
923
+ if self.built:
924
+ return
925
+ self.built = True
926
+ if getattr(self, "embeddings", None) is not None:
927
+ with tf.name_scope(self.embeddings.name):
928
+ self.embeddings.build(None)
929
+ if getattr(self, "encoder", None) is not None:
930
+ with tf.name_scope(self.encoder.name):
931
+ self.encoder.build(None)
932
+ if getattr(self, "pooler", None) is not None:
933
+ with tf.name_scope(self.pooler.name):
934
+ self.pooler.build(None)
935
+
936
+
937
+ class TFLayoutLMPreTrainedModel(TFPreTrainedModel):
938
+ """
939
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
940
+ models.
941
+ """
942
+
943
+ config_class = LayoutLMConfig
944
+ base_model_prefix = "layoutlm"
945
+
946
+ @property
947
+ def input_signature(self):
948
+ signature = super().input_signature
949
+ signature["bbox"] = tf.TensorSpec(shape=(None, None, 4), dtype=tf.int32, name="bbox")
950
+ return signature
951
+
952
+
953
+ LAYOUTLM_START_DOCSTRING = r"""
954
+
955
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
956
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
957
+ etc.)
958
+
959
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
960
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
961
+ behavior.
962
+
963
+ <Tip>
964
+
965
+ TensorFlow models and layers in `transformers` accept two formats as input:
966
+
967
+ - having all inputs as keyword arguments (like PyTorch models), or
968
+ - having all inputs as a list, tuple or dict in the first positional argument.
969
+
970
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
971
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
972
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
973
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
974
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
975
+ positional argument:
976
+
977
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
978
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
979
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
980
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
981
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
982
+
983
+ Note that when creating models and layers with
984
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
985
+ about any of this, as you can just pass inputs like you would to any other Python function!
986
+
987
+ </Tip>
988
+
989
+ Args:
990
+ config ([`LayoutLMConfig`]): Model configuration class with all the parameters of the model.
991
+ Initializing with a config file does not load the weights associated with the model, only the
992
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
993
+ """
994
+
995
+ LAYOUTLM_INPUTS_DOCSTRING = r"""
996
+ Args:
997
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
998
+ Indices of input sequence tokens in the vocabulary.
999
+
1000
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1001
+ [`PreTrainedTokenizer.encode`] for details.
1002
+
1003
+ [What are input IDs?](../glossary#input-ids)
1004
+ bbox (`Numpy array` or `tf.Tensor` of shape `({0}, 4)`, *optional*):
1005
+ Bounding Boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-
1006
+ 1]`.
1007
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1008
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1009
+
1010
+ - 1 for tokens that are **not masked**,
1011
+ - 0 for tokens that are **masked**.
1012
+
1013
+ [What are attention masks?](../glossary#attention-mask)
1014
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1015
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1016
+ 1]`:
1017
+
1018
+ - 0 corresponds to a *sentence A* token,
1019
+ - 1 corresponds to a *sentence B* token.
1020
+
1021
+ [What are token type IDs?](../glossary#token-type-ids)
1022
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1023
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1024
+ config.max_position_embeddings - 1]`.
1025
+
1026
+ [What are position IDs?](../glossary#position-ids)
1027
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1028
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1029
+
1030
+ - 1 indicates the head is **not masked**,
1031
+ - 0 indicates the head is **masked**.
1032
+
1033
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1034
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1035
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1036
+ model's internal embedding lookup matrix.
1037
+ output_attentions (`bool`, *optional*):
1038
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1039
+ tensors for more detail.
1040
+ output_hidden_states (`bool`, *optional*):
1041
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1042
+ more detail.
1043
+ return_dict (`bool`, *optional*):
1044
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1045
+ training (`bool`, *optional*, defaults to `False`):
1046
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1047
+ behaviors between training and evaluation).
1048
+ """
1049
+
1050
+
1051
+ @add_start_docstrings(
1052
+ "The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top.",
1053
+ LAYOUTLM_START_DOCSTRING,
1054
+ )
1055
+ class TFLayoutLMModel(TFLayoutLMPreTrainedModel):
1056
+ def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):
1057
+ super().__init__(config, *inputs, **kwargs)
1058
+
1059
+ self.layoutlm = TFLayoutLMMainLayer(config, name="layoutlm")
1060
+
1061
+ @unpack_inputs
1062
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1063
+ @replace_return_docstrings(
1064
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC
1065
+ )
1066
+ def call(
1067
+ self,
1068
+ input_ids: TFModelInputType | None = None,
1069
+ bbox: np.ndarray | tf.Tensor | None = None,
1070
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1071
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1072
+ position_ids: np.ndarray | tf.Tensor | None = None,
1073
+ head_mask: np.ndarray | tf.Tensor | None = None,
1074
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1075
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1076
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1077
+ output_attentions: Optional[bool] = None,
1078
+ output_hidden_states: Optional[bool] = None,
1079
+ return_dict: Optional[bool] = None,
1080
+ training: Optional[bool] = False,
1081
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
1082
+ r"""
1083
+ Returns:
1084
+
1085
+ Examples:
1086
+
1087
+ ```python
1088
+ >>> from transformers import AutoTokenizer, TFLayoutLMModel
1089
+ >>> import tensorflow as tf
1090
+
1091
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
1092
+ >>> model = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased")
1093
+
1094
+ >>> words = ["Hello", "world"]
1095
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
1096
+
1097
+ >>> token_boxes = []
1098
+ >>> for word, box in zip(words, normalized_word_boxes):
1099
+ ... word_tokens = tokenizer.tokenize(word)
1100
+ ... token_boxes.extend([box] * len(word_tokens))
1101
+ >>> # add bounding boxes of cls + sep tokens
1102
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
1103
+
1104
+ >>> encoding = tokenizer(" ".join(words), return_tensors="tf")
1105
+ >>> input_ids = encoding["input_ids"]
1106
+ >>> attention_mask = encoding["attention_mask"]
1107
+ >>> token_type_ids = encoding["token_type_ids"]
1108
+ >>> bbox = tf.convert_to_tensor([token_boxes])
1109
+
1110
+ >>> outputs = model(
1111
+ ... input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids
1112
+ ... )
1113
+
1114
+ >>> last_hidden_states = outputs.last_hidden_state
1115
+ ```"""
1116
+ outputs = self.layoutlm(
1117
+ input_ids=input_ids,
1118
+ bbox=bbox,
1119
+ attention_mask=attention_mask,
1120
+ token_type_ids=token_type_ids,
1121
+ position_ids=position_ids,
1122
+ head_mask=head_mask,
1123
+ inputs_embeds=inputs_embeds,
1124
+ output_attentions=output_attentions,
1125
+ output_hidden_states=output_hidden_states,
1126
+ return_dict=return_dict,
1127
+ training=training,
1128
+ )
1129
+
1130
+ return outputs
1131
+
1132
+ def build(self, input_shape=None):
1133
+ if self.built:
1134
+ return
1135
+ self.built = True
1136
+ if getattr(self, "layoutlm", None) is not None:
1137
+ with tf.name_scope(self.layoutlm.name):
1138
+ self.layoutlm.build(None)
1139
+
1140
+
1141
+ @add_start_docstrings("""LayoutLM Model with a `language modeling` head on top.""", LAYOUTLM_START_DOCSTRING)
1142
+ class TFLayoutLMForMaskedLM(TFLayoutLMPreTrainedModel, TFMaskedLanguageModelingLoss):
1143
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1144
+ _keys_to_ignore_on_load_unexpected = [
1145
+ r"pooler",
1146
+ r"cls.seq_relationship",
1147
+ r"cls.predictions.decoder.weight",
1148
+ r"nsp___cls",
1149
+ ]
1150
+
1151
+ def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):
1152
+ super().__init__(config, *inputs, **kwargs)
1153
+
1154
+ if config.is_decoder:
1155
+ logger.warning(
1156
+ "If you want to use `TFLayoutLMForMaskedLM` make sure `config.is_decoder=False` for "
1157
+ "bi-directional self-attention."
1158
+ )
1159
+
1160
+ self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name="layoutlm")
1161
+ self.mlm = TFLayoutLMMLMHead(config, input_embeddings=self.layoutlm.embeddings, name="mlm___cls")
1162
+
1163
+ def get_lm_head(self) -> keras.layers.Layer:
1164
+ return self.mlm.predictions
1165
+
1166
+ def get_prefix_bias_name(self) -> str:
1167
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1168
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1169
+
1170
+ @unpack_inputs
1171
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1172
+ @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
1173
+ def call(
1174
+ self,
1175
+ input_ids: TFModelInputType | None = None,
1176
+ bbox: np.ndarray | tf.Tensor | None = None,
1177
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1178
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1179
+ position_ids: np.ndarray | tf.Tensor | None = None,
1180
+ head_mask: np.ndarray | tf.Tensor | None = None,
1181
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1182
+ output_attentions: Optional[bool] = None,
1183
+ output_hidden_states: Optional[bool] = None,
1184
+ return_dict: Optional[bool] = None,
1185
+ labels: np.ndarray | tf.Tensor | None = None,
1186
+ training: Optional[bool] = False,
1187
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1188
+ r"""
1189
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1190
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1191
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1192
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1193
+
1194
+ Returns:
1195
+
1196
+ Examples:
1197
+
1198
+ ```python
1199
+ >>> from transformers import AutoTokenizer, TFLayoutLMForMaskedLM
1200
+ >>> import tensorflow as tf
1201
+
1202
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
1203
+ >>> model = TFLayoutLMForMaskedLM.from_pretrained("microsoft/layoutlm-base-uncased")
1204
+
1205
+ >>> words = ["Hello", "[MASK]"]
1206
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
1207
+
1208
+ >>> token_boxes = []
1209
+ >>> for word, box in zip(words, normalized_word_boxes):
1210
+ ... word_tokens = tokenizer.tokenize(word)
1211
+ ... token_boxes.extend([box] * len(word_tokens))
1212
+ >>> # add bounding boxes of cls + sep tokens
1213
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
1214
+
1215
+ >>> encoding = tokenizer(" ".join(words), return_tensors="tf")
1216
+ >>> input_ids = encoding["input_ids"]
1217
+ >>> attention_mask = encoding["attention_mask"]
1218
+ >>> token_type_ids = encoding["token_type_ids"]
1219
+ >>> bbox = tf.convert_to_tensor([token_boxes])
1220
+
1221
+ >>> labels = tokenizer("Hello world", return_tensors="tf")["input_ids"]
1222
+
1223
+ >>> outputs = model(
1224
+ ... input_ids=input_ids,
1225
+ ... bbox=bbox,
1226
+ ... attention_mask=attention_mask,
1227
+ ... token_type_ids=token_type_ids,
1228
+ ... labels=labels,
1229
+ ... )
1230
+
1231
+ >>> loss = outputs.loss
1232
+ ```"""
1233
+ outputs = self.layoutlm(
1234
+ input_ids=input_ids,
1235
+ bbox=bbox,
1236
+ attention_mask=attention_mask,
1237
+ token_type_ids=token_type_ids,
1238
+ position_ids=position_ids,
1239
+ head_mask=head_mask,
1240
+ inputs_embeds=inputs_embeds,
1241
+ output_attentions=output_attentions,
1242
+ output_hidden_states=output_hidden_states,
1243
+ return_dict=return_dict,
1244
+ training=training,
1245
+ )
1246
+ sequence_output = outputs[0]
1247
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
1248
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1249
+
1250
+ if not return_dict:
1251
+ output = (prediction_scores,) + outputs[2:]
1252
+ return ((loss,) + output) if loss is not None else output
1253
+
1254
+ return TFMaskedLMOutput(
1255
+ loss=loss,
1256
+ logits=prediction_scores,
1257
+ hidden_states=outputs.hidden_states,
1258
+ attentions=outputs.attentions,
1259
+ )
1260
+
1261
+ def build(self, input_shape=None):
1262
+ if self.built:
1263
+ return
1264
+ self.built = True
1265
+ if getattr(self, "layoutlm", None) is not None:
1266
+ with tf.name_scope(self.layoutlm.name):
1267
+ self.layoutlm.build(None)
1268
+ if getattr(self, "mlm", None) is not None:
1269
+ with tf.name_scope(self.mlm.name):
1270
+ self.mlm.build(None)
1271
+
1272
+
1273
+ @add_start_docstrings(
1274
+ """
1275
+ LayoutLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1276
+ pooled output) e.g. for GLUE tasks.
1277
+ """,
1278
+ LAYOUTLM_START_DOCSTRING,
1279
+ )
1280
+ class TFLayoutLMForSequenceClassification(TFLayoutLMPreTrainedModel, TFSequenceClassificationLoss):
1281
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1282
+ _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
1283
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1284
+
1285
+ def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):
1286
+ super().__init__(config, *inputs, **kwargs)
1287
+
1288
+ self.num_labels = config.num_labels
1289
+
1290
+ self.layoutlm = TFLayoutLMMainLayer(config, name="layoutlm")
1291
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1292
+ self.classifier = keras.layers.Dense(
1293
+ units=config.num_labels,
1294
+ kernel_initializer=get_initializer(config.initializer_range),
1295
+ name="classifier",
1296
+ )
1297
+ self.config = config
1298
+
1299
+ @unpack_inputs
1300
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1301
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
1302
+ def call(
1303
+ self,
1304
+ input_ids: TFModelInputType | None = None,
1305
+ bbox: np.ndarray | tf.Tensor | None = None,
1306
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1307
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1308
+ position_ids: np.ndarray | tf.Tensor | None = None,
1309
+ head_mask: np.ndarray | tf.Tensor | None = None,
1310
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1311
+ output_attentions: Optional[bool] = None,
1312
+ output_hidden_states: Optional[bool] = None,
1313
+ return_dict: Optional[bool] = None,
1314
+ labels: np.ndarray | tf.Tensor | None = None,
1315
+ training: Optional[bool] = False,
1316
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1317
+ r"""
1318
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1319
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1320
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1321
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1322
+
1323
+ Returns:
1324
+
1325
+ Examples:
1326
+
1327
+ ```python
1328
+ >>> from transformers import AutoTokenizer, TFLayoutLMForSequenceClassification
1329
+ >>> import tensorflow as tf
1330
+
1331
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
1332
+ >>> model = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased")
1333
+
1334
+ >>> words = ["Hello", "world"]
1335
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
1336
+
1337
+ >>> token_boxes = []
1338
+ >>> for word, box in zip(words, normalized_word_boxes):
1339
+ ... word_tokens = tokenizer.tokenize(word)
1340
+ ... token_boxes.extend([box] * len(word_tokens))
1341
+ >>> # add bounding boxes of cls + sep tokens
1342
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
1343
+
1344
+ >>> encoding = tokenizer(" ".join(words), return_tensors="tf")
1345
+ >>> input_ids = encoding["input_ids"]
1346
+ >>> attention_mask = encoding["attention_mask"]
1347
+ >>> token_type_ids = encoding["token_type_ids"]
1348
+ >>> bbox = tf.convert_to_tensor([token_boxes])
1349
+ >>> sequence_label = tf.convert_to_tensor([1])
1350
+
1351
+ >>> outputs = model(
1352
+ ... input_ids=input_ids,
1353
+ ... bbox=bbox,
1354
+ ... attention_mask=attention_mask,
1355
+ ... token_type_ids=token_type_ids,
1356
+ ... labels=sequence_label,
1357
+ ... )
1358
+
1359
+ >>> loss = outputs.loss
1360
+ >>> logits = outputs.logits
1361
+ ```"""
1362
+ outputs = self.layoutlm(
1363
+ input_ids=input_ids,
1364
+ bbox=bbox,
1365
+ attention_mask=attention_mask,
1366
+ token_type_ids=token_type_ids,
1367
+ position_ids=position_ids,
1368
+ head_mask=head_mask,
1369
+ inputs_embeds=inputs_embeds,
1370
+ output_attentions=output_attentions,
1371
+ output_hidden_states=output_hidden_states,
1372
+ return_dict=return_dict,
1373
+ training=training,
1374
+ )
1375
+ pooled_output = outputs[1]
1376
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1377
+ logits = self.classifier(inputs=pooled_output)
1378
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1379
+
1380
+ if not return_dict:
1381
+ output = (logits,) + outputs[2:]
1382
+ return ((loss,) + output) if loss is not None else output
1383
+
1384
+ return TFSequenceClassifierOutput(
1385
+ loss=loss,
1386
+ logits=logits,
1387
+ hidden_states=outputs.hidden_states,
1388
+ attentions=outputs.attentions,
1389
+ )
1390
+
1391
+ def build(self, input_shape=None):
1392
+ if self.built:
1393
+ return
1394
+ self.built = True
1395
+ if getattr(self, "layoutlm", None) is not None:
1396
+ with tf.name_scope(self.layoutlm.name):
1397
+ self.layoutlm.build(None)
1398
+ if getattr(self, "classifier", None) is not None:
1399
+ with tf.name_scope(self.classifier.name):
1400
+ self.classifier.build([None, None, self.config.hidden_size])
1401
+
1402
+
1403
+ @add_start_docstrings(
1404
+ """
1405
+ LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1406
+ Named-Entity-Recognition (NER) tasks.
1407
+ """,
1408
+ LAYOUTLM_START_DOCSTRING,
1409
+ )
1410
+ class TFLayoutLMForTokenClassification(TFLayoutLMPreTrainedModel, TFTokenClassificationLoss):
1411
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1412
+ _keys_to_ignore_on_load_unexpected = [
1413
+ r"pooler",
1414
+ r"mlm___cls",
1415
+ r"nsp___cls",
1416
+ r"cls.predictions",
1417
+ r"cls.seq_relationship",
1418
+ ]
1419
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1420
+
1421
+ def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):
1422
+ super().__init__(config, *inputs, **kwargs)
1423
+
1424
+ self.num_labels = config.num_labels
1425
+
1426
+ self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name="layoutlm")
1427
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1428
+ self.classifier = keras.layers.Dense(
1429
+ units=config.num_labels,
1430
+ kernel_initializer=get_initializer(config.initializer_range),
1431
+ name="classifier",
1432
+ )
1433
+ self.config = config
1434
+
1435
+ @unpack_inputs
1436
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1437
+ @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1438
+ def call(
1439
+ self,
1440
+ input_ids: TFModelInputType | None = None,
1441
+ bbox: np.ndarray | tf.Tensor | None = None,
1442
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1443
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1444
+ position_ids: np.ndarray | tf.Tensor | None = None,
1445
+ head_mask: np.ndarray | tf.Tensor | None = None,
1446
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1447
+ output_attentions: Optional[bool] = None,
1448
+ output_hidden_states: Optional[bool] = None,
1449
+ return_dict: Optional[bool] = None,
1450
+ labels: np.ndarray | tf.Tensor | None = None,
1451
+ training: Optional[bool] = False,
1452
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1453
+ r"""
1454
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
1455
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1456
+
1457
+ Returns:
1458
+
1459
+ Examples:
1460
+
1461
+ ```python
1462
+ >>> import tensorflow as tf
1463
+ >>> from transformers import AutoTokenizer, TFLayoutLMForTokenClassification
1464
+
1465
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
1466
+ >>> model = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased")
1467
+
1468
+ >>> words = ["Hello", "world"]
1469
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
1470
+
1471
+ >>> token_boxes = []
1472
+ >>> for word, box in zip(words, normalized_word_boxes):
1473
+ ... word_tokens = tokenizer.tokenize(word)
1474
+ ... token_boxes.extend([box] * len(word_tokens))
1475
+ >>> # add bounding boxes of cls + sep tokens
1476
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
1477
+
1478
+ >>> encoding = tokenizer(" ".join(words), return_tensors="tf")
1479
+ >>> input_ids = encoding["input_ids"]
1480
+ >>> attention_mask = encoding["attention_mask"]
1481
+ >>> token_type_ids = encoding["token_type_ids"]
1482
+ >>> bbox = tf.convert_to_tensor([token_boxes])
1483
+ >>> token_labels = tf.convert_to_tensor([1, 1, 0, 0])
1484
+
1485
+ >>> outputs = model(
1486
+ ... input_ids=input_ids,
1487
+ ... bbox=bbox,
1488
+ ... attention_mask=attention_mask,
1489
+ ... token_type_ids=token_type_ids,
1490
+ ... labels=token_labels,
1491
+ ... )
1492
+
1493
+ >>> loss = outputs.loss
1494
+ >>> logits = outputs.logits
1495
+ ```"""
1496
+ outputs = self.layoutlm(
1497
+ input_ids=input_ids,
1498
+ bbox=bbox,
1499
+ attention_mask=attention_mask,
1500
+ token_type_ids=token_type_ids,
1501
+ position_ids=position_ids,
1502
+ head_mask=head_mask,
1503
+ inputs_embeds=inputs_embeds,
1504
+ output_attentions=output_attentions,
1505
+ output_hidden_states=output_hidden_states,
1506
+ return_dict=return_dict,
1507
+ training=training,
1508
+ )
1509
+ sequence_output = outputs[0]
1510
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
1511
+ logits = self.classifier(inputs=sequence_output)
1512
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1513
+
1514
+ if not return_dict:
1515
+ output = (logits,) + outputs[2:]
1516
+ return ((loss,) + output) if loss is not None else output
1517
+
1518
+ return TFTokenClassifierOutput(
1519
+ loss=loss,
1520
+ logits=logits,
1521
+ hidden_states=outputs.hidden_states,
1522
+ attentions=outputs.attentions,
1523
+ )
1524
+
1525
+ def build(self, input_shape=None):
1526
+ if self.built:
1527
+ return
1528
+ self.built = True
1529
+ if getattr(self, "layoutlm", None) is not None:
1530
+ with tf.name_scope(self.layoutlm.name):
1531
+ self.layoutlm.build(None)
1532
+ if getattr(self, "classifier", None) is not None:
1533
+ with tf.name_scope(self.classifier.name):
1534
+ self.classifier.build([None, None, self.config.hidden_size])
1535
+
1536
+
1537
+ @add_start_docstrings(
1538
+ """
1539
+ LayoutLM Model with a span classification head on top for extractive question-answering tasks such as
1540
+ [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the final hidden-states output to compute `span
1541
+ start logits` and `span end logits`).
1542
+ """,
1543
+ LAYOUTLM_START_DOCSTRING,
1544
+ )
1545
+ class TFLayoutLMForQuestionAnswering(TFLayoutLMPreTrainedModel, TFQuestionAnsweringLoss):
1546
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1547
+ _keys_to_ignore_on_load_unexpected = [
1548
+ r"pooler",
1549
+ r"mlm___cls",
1550
+ r"nsp___cls",
1551
+ r"cls.predictions",
1552
+ r"cls.seq_relationship",
1553
+ ]
1554
+
1555
+ def __init__(self, config: LayoutLMConfig, *inputs, **kwargs):
1556
+ super().__init__(config, *inputs, **kwargs)
1557
+ self.num_labels = config.num_labels
1558
+
1559
+ self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name="layoutlm")
1560
+ self.qa_outputs = keras.layers.Dense(
1561
+ units=config.num_labels,
1562
+ kernel_initializer=get_initializer(config.initializer_range),
1563
+ name="qa_outputs",
1564
+ )
1565
+ self.config = config
1566
+
1567
+ @unpack_inputs
1568
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1569
+ @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1570
+ def call(
1571
+ self,
1572
+ input_ids: TFModelInputType | None = None,
1573
+ bbox: np.ndarray | tf.Tensor | None = None,
1574
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1575
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1576
+ position_ids: np.ndarray | tf.Tensor | None = None,
1577
+ head_mask: np.ndarray | tf.Tensor | None = None,
1578
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1579
+ output_attentions: Optional[bool] = None,
1580
+ output_hidden_states: Optional[bool] = None,
1581
+ return_dict: Optional[bool] = None,
1582
+ start_positions: np.ndarray | tf.Tensor | None = None,
1583
+ end_positions: np.ndarray | tf.Tensor | None = None,
1584
+ training: Optional[bool] = False,
1585
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1586
+ r"""
1587
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1588
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1589
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1590
+ are not taken into account for computing the loss.
1591
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
1592
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1593
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1594
+ are not taken into account for computing the loss.
1595
+
1596
+ Returns:
1597
+
1598
+ Examples:
1599
+
1600
+ ```python
1601
+ >>> import tensorflow as tf
1602
+ >>> from transformers import AutoTokenizer, TFLayoutLMForQuestionAnswering
1603
+ >>> from datasets import load_dataset
1604
+
1605
+ >>> tokenizer = AutoTokenizer.from_pretrained("impira/layoutlm-document-qa", add_prefix_space=True)
1606
+ >>> model = TFLayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa", revision="1e3ebac")
1607
+
1608
+ >>> dataset = load_dataset("nielsr/funsd", split="train")
1609
+ >>> example = dataset[0]
1610
+ >>> question = "what's his name?"
1611
+ >>> words = example["words"]
1612
+ >>> boxes = example["bboxes"]
1613
+
1614
+ >>> encoding = tokenizer(
1615
+ ... question.split(), words, is_split_into_words=True, return_token_type_ids=True, return_tensors="tf"
1616
+ ... )
1617
+ >>> bbox = []
1618
+ >>> for i, s, w in zip(encoding.input_ids[0], encoding.sequence_ids(0), encoding.word_ids(0)):
1619
+ ... if s == 1:
1620
+ ... bbox.append(boxes[w])
1621
+ ... elif i == tokenizer.sep_token_id:
1622
+ ... bbox.append([1000] * 4)
1623
+ ... else:
1624
+ ... bbox.append([0] * 4)
1625
+ >>> encoding["bbox"] = tf.convert_to_tensor([bbox])
1626
+
1627
+ >>> word_ids = encoding.word_ids(0)
1628
+ >>> outputs = model(**encoding)
1629
+ >>> loss = outputs.loss
1630
+ >>> start_scores = outputs.start_logits
1631
+ >>> end_scores = outputs.end_logits
1632
+ >>> start, end = word_ids[tf.math.argmax(start_scores, -1)[0]], word_ids[tf.math.argmax(end_scores, -1)[0]]
1633
+ >>> print(" ".join(words[start : end + 1]))
1634
+ M. Hamann P. Harper, P. Martinez
1635
+ ```"""
1636
+
1637
+ outputs = self.layoutlm(
1638
+ input_ids=input_ids,
1639
+ bbox=bbox,
1640
+ attention_mask=attention_mask,
1641
+ token_type_ids=token_type_ids,
1642
+ position_ids=position_ids,
1643
+ head_mask=head_mask,
1644
+ inputs_embeds=inputs_embeds,
1645
+ output_attentions=output_attentions,
1646
+ output_hidden_states=output_hidden_states,
1647
+ return_dict=return_dict,
1648
+ training=training,
1649
+ )
1650
+
1651
+ sequence_output = outputs[0]
1652
+
1653
+ logits = self.qa_outputs(inputs=sequence_output)
1654
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1655
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1656
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1657
+ loss = None
1658
+
1659
+ if start_positions is not None and end_positions is not None:
1660
+ labels = {"start_position": start_positions}
1661
+ labels["end_position"] = end_positions
1662
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1663
+
1664
+ if not return_dict:
1665
+ output = (start_logits, end_logits) + outputs[2:]
1666
+ return ((loss,) + output) if loss is not None else output
1667
+
1668
+ return TFQuestionAnsweringModelOutput(
1669
+ loss=loss,
1670
+ start_logits=start_logits,
1671
+ end_logits=end_logits,
1672
+ hidden_states=outputs.hidden_states,
1673
+ attentions=outputs.attentions,
1674
+ )
1675
+
1676
+ def build(self, input_shape=None):
1677
+ if self.built:
1678
+ return
1679
+ self.built = True
1680
+ if getattr(self, "layoutlm", None) is not None:
1681
+ with tf.name_scope(self.layoutlm.name):
1682
+ self.layoutlm.build(None)
1683
+ if getattr(self, "qa_outputs", None) is not None:
1684
+ with tf.name_scope(self.qa_outputs.name):
1685
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/tokenization_layoutlm.py ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for model LayoutLM."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+
31
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
32
+ def load_vocab(vocab_file):
33
+ """Loads a vocabulary file into a dictionary."""
34
+ vocab = collections.OrderedDict()
35
+ with open(vocab_file, "r", encoding="utf-8") as reader:
36
+ tokens = reader.readlines()
37
+ for index, token in enumerate(tokens):
38
+ token = token.rstrip("\n")
39
+ vocab[token] = index
40
+ return vocab
41
+
42
+
43
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
44
+ def whitespace_tokenize(text):
45
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
46
+ text = text.strip()
47
+ if not text:
48
+ return []
49
+ tokens = text.split()
50
+ return tokens
51
+
52
+
53
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->LayoutLM,BERT->LayoutLM
54
+ class LayoutLMTokenizer(PreTrainedTokenizer):
55
+ r"""
56
+ Construct a LayoutLM tokenizer. Based on WordPiece.
57
+
58
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
59
+ this superclass for more information regarding those methods.
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ File containing the vocabulary.
64
+ do_lower_case (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to lowercase the input when tokenizing.
66
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
67
+ Whether or not to do basic tokenization before WordPiece.
68
+ never_split (`Iterable`, *optional*):
69
+ Collection of tokens which will never be split during tokenization. Only has an effect when
70
+ `do_basic_tokenize=True`
71
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
72
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
73
+ token instead.
74
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
75
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
76
+ sequence classification or for a text and a question for question answering. It is also used as the last
77
+ token of a sequence built with special tokens.
78
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
79
+ The token used for padding, for example when batching sequences of different lengths.
80
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
81
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
82
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
83
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
84
+ The token used for masking values. This is the token used when training this model with masked language
85
+ modeling. This is the token which the model will try to predict.
86
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
87
+ Whether or not to tokenize Chinese characters.
88
+
89
+ This should likely be deactivated for Japanese (see this
90
+ [issue](https://github.com/huggingface/transformers/issues/328)).
91
+ strip_accents (`bool`, *optional*):
92
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
93
+ value for `lowercase` (as in the original LayoutLM).
94
+ """
95
+
96
+ vocab_files_names = VOCAB_FILES_NAMES
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_file,
101
+ do_lower_case=True,
102
+ do_basic_tokenize=True,
103
+ never_split=None,
104
+ unk_token="[UNK]",
105
+ sep_token="[SEP]",
106
+ pad_token="[PAD]",
107
+ cls_token="[CLS]",
108
+ mask_token="[MASK]",
109
+ tokenize_chinese_chars=True,
110
+ strip_accents=None,
111
+ **kwargs,
112
+ ):
113
+ if not os.path.isfile(vocab_file):
114
+ raise ValueError(
115
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
116
+ " model use `tokenizer = LayoutLMTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
117
+ )
118
+ self.vocab = load_vocab(vocab_file)
119
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
120
+ self.do_basic_tokenize = do_basic_tokenize
121
+ if do_basic_tokenize:
122
+ self.basic_tokenizer = BasicTokenizer(
123
+ do_lower_case=do_lower_case,
124
+ never_split=never_split,
125
+ tokenize_chinese_chars=tokenize_chinese_chars,
126
+ strip_accents=strip_accents,
127
+ )
128
+
129
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
130
+
131
+ super().__init__(
132
+ do_lower_case=do_lower_case,
133
+ do_basic_tokenize=do_basic_tokenize,
134
+ never_split=never_split,
135
+ unk_token=unk_token,
136
+ sep_token=sep_token,
137
+ pad_token=pad_token,
138
+ cls_token=cls_token,
139
+ mask_token=mask_token,
140
+ tokenize_chinese_chars=tokenize_chinese_chars,
141
+ strip_accents=strip_accents,
142
+ **kwargs,
143
+ )
144
+
145
+ @property
146
+ def do_lower_case(self):
147
+ return self.basic_tokenizer.do_lower_case
148
+
149
+ @property
150
+ def vocab_size(self):
151
+ return len(self.vocab)
152
+
153
+ def get_vocab(self):
154
+ return dict(self.vocab, **self.added_tokens_encoder)
155
+
156
+ def _tokenize(self, text, split_special_tokens=False):
157
+ split_tokens = []
158
+ if self.do_basic_tokenize:
159
+ for token in self.basic_tokenizer.tokenize(
160
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
161
+ ):
162
+ # If the token is part of the never_split set
163
+ if token in self.basic_tokenizer.never_split:
164
+ split_tokens.append(token)
165
+ else:
166
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
167
+ else:
168
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
169
+ return split_tokens
170
+
171
+ def _convert_token_to_id(self, token):
172
+ """Converts a token (str) in an id using the vocab."""
173
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
174
+
175
+ def _convert_id_to_token(self, index):
176
+ """Converts an index (integer) in a token (str) using the vocab."""
177
+ return self.ids_to_tokens.get(index, self.unk_token)
178
+
179
+ def convert_tokens_to_string(self, tokens):
180
+ """Converts a sequence of tokens (string) in a single string."""
181
+ out_string = " ".join(tokens).replace(" ##", "").strip()
182
+ return out_string
183
+
184
+ def build_inputs_with_special_tokens(
185
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
186
+ ) -> List[int]:
187
+ """
188
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
189
+ adding special tokens. A LayoutLM sequence has the following format:
190
+
191
+ - single sequence: `[CLS] X [SEP]`
192
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
193
+
194
+ Args:
195
+ token_ids_0 (`List[int]`):
196
+ List of IDs to which the special tokens will be added.
197
+ token_ids_1 (`List[int]`, *optional*):
198
+ Optional second list of IDs for sequence pairs.
199
+
200
+ Returns:
201
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
202
+ """
203
+ if token_ids_1 is None:
204
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
205
+ cls = [self.cls_token_id]
206
+ sep = [self.sep_token_id]
207
+ return cls + token_ids_0 + sep + token_ids_1 + sep
208
+
209
+ def get_special_tokens_mask(
210
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
211
+ ) -> List[int]:
212
+ """
213
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
214
+ special tokens using the tokenizer `prepare_for_model` method.
215
+
216
+ Args:
217
+ token_ids_0 (`List[int]`):
218
+ List of IDs.
219
+ token_ids_1 (`List[int]`, *optional*):
220
+ Optional second list of IDs for sequence pairs.
221
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
222
+ Whether or not the token list is already formatted with special tokens for the model.
223
+
224
+ Returns:
225
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
226
+ """
227
+
228
+ if already_has_special_tokens:
229
+ return super().get_special_tokens_mask(
230
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
231
+ )
232
+
233
+ if token_ids_1 is not None:
234
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
235
+ return [1] + ([0] * len(token_ids_0)) + [1]
236
+
237
+ def create_token_type_ids_from_sequences(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A LayoutLM sequence
242
+ pair mask has the following format:
243
+
244
+ ```
245
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
246
+ | first sequence | second sequence |
247
+ ```
248
+
249
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
250
+
251
+ Args:
252
+ token_ids_0 (`List[int]`):
253
+ List of IDs.
254
+ token_ids_1 (`List[int]`, *optional*):
255
+ Optional second list of IDs for sequence pairs.
256
+
257
+ Returns:
258
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
259
+ """
260
+ sep = [self.sep_token_id]
261
+ cls = [self.cls_token_id]
262
+ if token_ids_1 is None:
263
+ return len(cls + token_ids_0 + sep) * [0]
264
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
265
+
266
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
267
+ index = 0
268
+ if os.path.isdir(save_directory):
269
+ vocab_file = os.path.join(
270
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
271
+ )
272
+ else:
273
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
274
+ with open(vocab_file, "w", encoding="utf-8") as writer:
275
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
276
+ if index != token_index:
277
+ logger.warning(
278
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
279
+ " Please check that the vocabulary is not corrupted!"
280
+ )
281
+ index = token_index
282
+ writer.write(token + "\n")
283
+ index += 1
284
+ return (vocab_file,)
285
+
286
+
287
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
288
+ class BasicTokenizer(object):
289
+ """
290
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
291
+
292
+ Args:
293
+ do_lower_case (`bool`, *optional*, defaults to `True`):
294
+ Whether or not to lowercase the input when tokenizing.
295
+ never_split (`Iterable`, *optional*):
296
+ Collection of tokens which will never be split during tokenization. Only has an effect when
297
+ `do_basic_tokenize=True`
298
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
299
+ Whether or not to tokenize Chinese characters.
300
+
301
+ This should likely be deactivated for Japanese (see this
302
+ [issue](https://github.com/huggingface/transformers/issues/328)).
303
+ strip_accents (`bool`, *optional*):
304
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
305
+ value for `lowercase` (as in the original BERT).
306
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
307
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
308
+ the full context of the words, such as contractions.
309
+ """
310
+
311
+ def __init__(
312
+ self,
313
+ do_lower_case=True,
314
+ never_split=None,
315
+ tokenize_chinese_chars=True,
316
+ strip_accents=None,
317
+ do_split_on_punc=True,
318
+ ):
319
+ if never_split is None:
320
+ never_split = []
321
+ self.do_lower_case = do_lower_case
322
+ self.never_split = set(never_split)
323
+ self.tokenize_chinese_chars = tokenize_chinese_chars
324
+ self.strip_accents = strip_accents
325
+ self.do_split_on_punc = do_split_on_punc
326
+
327
+ def tokenize(self, text, never_split=None):
328
+ """
329
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
330
+
331
+ Args:
332
+ never_split (`List[str]`, *optional*)
333
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
334
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
335
+ """
336
+ # union() returns a new set by concatenating the two sets.
337
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
338
+ text = self._clean_text(text)
339
+
340
+ # This was added on November 1st, 2018 for the multilingual and Chinese
341
+ # models. This is also applied to the English models now, but it doesn't
342
+ # matter since the English models were not trained on any Chinese data
343
+ # and generally don't have any Chinese data in them (there are Chinese
344
+ # characters in the vocabulary because Wikipedia does have some Chinese
345
+ # words in the English Wikipedia.).
346
+ if self.tokenize_chinese_chars:
347
+ text = self._tokenize_chinese_chars(text)
348
+ # prevents treating the same character with different unicode codepoints as different characters
349
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
350
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
351
+ split_tokens = []
352
+ for token in orig_tokens:
353
+ if token not in never_split:
354
+ if self.do_lower_case:
355
+ token = token.lower()
356
+ if self.strip_accents is not False:
357
+ token = self._run_strip_accents(token)
358
+ elif self.strip_accents:
359
+ token = self._run_strip_accents(token)
360
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
361
+
362
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
363
+ return output_tokens
364
+
365
+ def _run_strip_accents(self, text):
366
+ """Strips accents from a piece of text."""
367
+ text = unicodedata.normalize("NFD", text)
368
+ output = []
369
+ for char in text:
370
+ cat = unicodedata.category(char)
371
+ if cat == "Mn":
372
+ continue
373
+ output.append(char)
374
+ return "".join(output)
375
+
376
+ def _run_split_on_punc(self, text, never_split=None):
377
+ """Splits punctuation on a piece of text."""
378
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
379
+ return [text]
380
+ chars = list(text)
381
+ i = 0
382
+ start_new_word = True
383
+ output = []
384
+ while i < len(chars):
385
+ char = chars[i]
386
+ if _is_punctuation(char):
387
+ output.append([char])
388
+ start_new_word = True
389
+ else:
390
+ if start_new_word:
391
+ output.append([])
392
+ start_new_word = False
393
+ output[-1].append(char)
394
+ i += 1
395
+
396
+ return ["".join(x) for x in output]
397
+
398
+ def _tokenize_chinese_chars(self, text):
399
+ """Adds whitespace around any CJK character."""
400
+ output = []
401
+ for char in text:
402
+ cp = ord(char)
403
+ if self._is_chinese_char(cp):
404
+ output.append(" ")
405
+ output.append(char)
406
+ output.append(" ")
407
+ else:
408
+ output.append(char)
409
+ return "".join(output)
410
+
411
+ def _is_chinese_char(self, cp):
412
+ """Checks whether CP is the codepoint of a CJK character."""
413
+ # This defines a "chinese character" as anything in the CJK Unicode block:
414
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
415
+ #
416
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
417
+ # despite its name. The modern Korean Hangul alphabet is a different block,
418
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
419
+ # space-separated words, so they are not treated specially and handled
420
+ # like the all of the other languages.
421
+ if (
422
+ (cp >= 0x4E00 and cp <= 0x9FFF)
423
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
424
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
425
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
426
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
427
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
428
+ or (cp >= 0xF900 and cp <= 0xFAFF)
429
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
430
+ ): #
431
+ return True
432
+
433
+ return False
434
+
435
+ def _clean_text(self, text):
436
+ """Performs invalid character removal and whitespace cleanup on text."""
437
+ output = []
438
+ for char in text:
439
+ cp = ord(char)
440
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
441
+ continue
442
+ if _is_whitespace(char):
443
+ output.append(" ")
444
+ else:
445
+ output.append(char)
446
+ return "".join(output)
447
+
448
+
449
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
450
+ class WordpieceTokenizer(object):
451
+ """Runs WordPiece tokenization."""
452
+
453
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
454
+ self.vocab = vocab
455
+ self.unk_token = unk_token
456
+ self.max_input_chars_per_word = max_input_chars_per_word
457
+
458
+ def tokenize(self, text):
459
+ """
460
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
461
+ tokenization using the given vocabulary.
462
+
463
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
464
+
465
+ Args:
466
+ text: A single token or whitespace separated tokens. This should have
467
+ already been passed through *BasicTokenizer*.
468
+
469
+ Returns:
470
+ A list of wordpiece tokens.
471
+ """
472
+
473
+ output_tokens = []
474
+ for token in whitespace_tokenize(text):
475
+ chars = list(token)
476
+ if len(chars) > self.max_input_chars_per_word:
477
+ output_tokens.append(self.unk_token)
478
+ continue
479
+
480
+ is_bad = False
481
+ start = 0
482
+ sub_tokens = []
483
+ while start < len(chars):
484
+ end = len(chars)
485
+ cur_substr = None
486
+ while start < end:
487
+ substr = "".join(chars[start:end])
488
+ if start > 0:
489
+ substr = "##" + substr
490
+ if substr in self.vocab:
491
+ cur_substr = substr
492
+ break
493
+ end -= 1
494
+ if cur_substr is None:
495
+ is_bad = True
496
+ break
497
+ sub_tokens.append(cur_substr)
498
+ start = end
499
+
500
+ if is_bad:
501
+ output_tokens.append(self.unk_token)
502
+ else:
503
+ output_tokens.extend(sub_tokens)
504
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_pegasus_x"] = [
30
+ "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "PegasusXForConditionalGeneration",
32
+ "PegasusXModel",
33
+ "PegasusXPreTrainedModel",
34
+ ]
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .modeling_pegasus_x import (
47
+ PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
48
+ PegasusXForConditionalGeneration,
49
+ PegasusXModel,
50
+ PegasusXPreTrainedModel,
51
+ )
52
+
53
+
54
+ else:
55
+ import sys
56
+
57
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (951 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/configuration_pegasus_x.cpython-310.pyc ADDED
Binary file (7.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/__pycache__/modeling_pegasus_x.cpython-310.pyc ADDED
Binary file (49.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/configuration_pegasus_x.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PEGASUS-X model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class PegasusXConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`PegasusXModel`]. It is used to instantiate a
30
+ PEGASUS-X model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the PEGASUS-X
32
+ [google/pegasus-x-large](https://huggingface.co/google/pegasus-x-large) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 96103):
40
+ Vocabulary size of the PEGASUS-X model. Defines the number of different tokens that can be represented by
41
+ the `inputs_ids` passed when calling [`PegasusXModel`].
42
+ d_model (`int`, *optional*, defaults to 1024):
43
+ Dimension of the layers and the pooler layer.
44
+ encoder_layers (`int`, *optional*, defaults to 16):
45
+ Number of encoder layers.
46
+ decoder_layers (`int`, *optional*, defaults to 16):
47
+ Number of decoder layers.
48
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer decoder.
52
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
53
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
54
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
55
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
56
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
59
+ dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ attention_dropout (`float`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the attention probabilities.
63
+ activation_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for activations inside the fully connected layer.
65
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
66
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
67
+ just in case (e.g., 512 or 1024 or 2048).
68
+ init_std (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
71
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
72
+ for more details.
73
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
74
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
75
+ for more details.
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models)
78
+ forced_eos_token_id (`int`, *optional*, defaults to 1):
79
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
80
+ `eos_token_id`.
81
+ num_global_tokens (`int`, *optional*, defaults to 128):
82
+ Number of global tokens to use for the encoder
83
+ block_size (`int`, *optional*, defaults to 512):
84
+ Block size for encoder local attention. Sequence length should be an exact multiple of block size.
85
+ block_size must be a multiple of 2 if stagger_local_block is True
86
+ stagger_local_block (`bool`, *optional*, defaults to `True`):
87
+ Whether to stagger every other local attention by half a block
88
+
89
+ Example:
90
+
91
+ ```python
92
+ >>> from transformers import PegasusXConfig, PegasusXModel
93
+
94
+ >>> # Initializing a PEGASUS google/pegasus-x-large style configuration
95
+ >>> configuration = PegasusXConfig()
96
+
97
+ >>> # Initializing a model (with random weights) from the google/pegasus-x-large style configuration
98
+ >>> model = PegasusXModel(configuration)
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "pegasus_x"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=96103,
111
+ max_position_embeddings=16384,
112
+ encoder_layers=16,
113
+ encoder_ffn_dim=4096,
114
+ encoder_attention_heads=16,
115
+ decoder_layers=16,
116
+ decoder_ffn_dim=4096,
117
+ decoder_attention_heads=16,
118
+ encoder_layerdrop=0.0,
119
+ decoder_layerdrop=0.0,
120
+ use_cache=True,
121
+ is_encoder_decoder=True,
122
+ activation_function="gelu",
123
+ d_model=1024,
124
+ dropout=0.1,
125
+ attention_dropout=0.0,
126
+ activation_dropout=0.0,
127
+ init_std=0.02,
128
+ decoder_start_token_id=0,
129
+ scale_embedding=True,
130
+ pad_token_id=0,
131
+ eos_token_id=1,
132
+ forced_eos_token_id=1,
133
+ num_global_tokens=32,
134
+ block_size=512,
135
+ stagger_local_blocks=True,
136
+ **kwargs,
137
+ ):
138
+ self.vocab_size = vocab_size
139
+ self.max_position_embeddings = max_position_embeddings
140
+ self.d_model = d_model
141
+ self.encoder_ffn_dim = encoder_ffn_dim
142
+ self.encoder_layers = encoder_layers
143
+ self.encoder_attention_heads = encoder_attention_heads
144
+ self.decoder_ffn_dim = decoder_ffn_dim
145
+ self.decoder_layers = decoder_layers
146
+ self.decoder_attention_heads = decoder_attention_heads
147
+ self.dropout = dropout
148
+ self.attention_dropout = attention_dropout
149
+ self.activation_dropout = activation_dropout
150
+ self.activation_function = activation_function
151
+ self.init_std = init_std
152
+ self.encoder_layerdrop = encoder_layerdrop
153
+ self.decoder_layerdrop = decoder_layerdrop
154
+ self.use_cache = use_cache
155
+ self.num_hidden_layers = encoder_layers
156
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
157
+
158
+ self.num_global_tokens = num_global_tokens
159
+ self.block_size = block_size
160
+ self.stagger_local_blocks = stagger_local_blocks
161
+
162
+ super().__init__(
163
+ pad_token_id=pad_token_id,
164
+ eos_token_id=eos_token_id,
165
+ is_encoder_decoder=is_encoder_decoder,
166
+ decoder_start_token_id=decoder_start_token_id,
167
+ forced_eos_token_id=forced_eos_token_id,
168
+ **kwargs,
169
+ )
170
+
171
+ @property
172
+ def num_attention_heads(self) -> int:
173
+ return self.encoder_attention_heads
174
+
175
+ @property
176
+ def hidden_size(self) -> int:
177
+ return self.d_model
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus_x/modeling_pegasus_x.py ADDED
@@ -0,0 +1,1627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch PEGASUS-X model."""
16
+
17
+ import dataclasses
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutput,
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ Seq2SeqLMOutput,
33
+ Seq2SeqModelOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...utils import (
37
+ add_end_docstrings,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_pegasus_x import PegasusXConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CHECKPOINT_FOR_DOC = "google/pegasus-x-base"
49
+ _CONFIG_FOR_DOC = "PegasusXConfig"
50
+
51
+
52
+ from ..deprecated._archive_maps import PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ @dataclasses.dataclass
56
+ class DimensionInfo:
57
+ """Wrapper for dimension info."""
58
+
59
+ batch_size: int # batch size
60
+ seq_len: int # token length
61
+ block_size: int # block size
62
+ num_heads: int # num heads
63
+ hidden_dim: int # hidden dim
64
+ dim_per_head: int # dim per head
65
+ num_blocks: int # num blocks
66
+ global_len: int # global length
67
+ padded_seq_len: int # padded token seq length
68
+
69
+ # Note: Compared to the original Flax implementation, we will pad the token representations to
70
+ # a multiple of block size at the start of the encoder layers, so T=P always.
71
+
72
+
73
+ # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
74
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
75
+ """
76
+ Shift input ids one token to the right.
77
+ """
78
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
79
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
80
+ shifted_input_ids[:, 0] = decoder_start_token_id
81
+
82
+ if pad_token_id is None:
83
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
84
+ # replace possible -100 values in labels by `pad_token_id`
85
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
86
+
87
+ return shifted_input_ids
88
+
89
+
90
+ class PegasusXSinusoidalPositionalEmbedding(nn.Module):
91
+ """This module produces sinusoidal positional embeddings of any length."""
92
+
93
+ def __init__(self, embed_dim, max_scale: int = 10000.0):
94
+ super().__init__()
95
+ self.embed_dim = embed_dim
96
+ self.max_scale = max_scale
97
+
98
+ @torch.no_grad()
99
+ def forward(self, input_embeds: torch.Tensor, past_key_values_length: int = 0) -> torch.Tensor:
100
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
101
+ batch_size, seq_len = input_embeds.shape[:2]
102
+ positions = torch.arange(
103
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=input_embeds.device
104
+ )[:, None]
105
+ pe = torch.zeros((seq_len, self.embed_dim), device=input_embeds.device, dtype=input_embeds.dtype)
106
+ half_d_feature = self.embed_dim // 2
107
+ div_term = torch.exp(
108
+ torch.arange(half_d_feature, device=input_embeds.device, dtype=torch.int64).type_as(input_embeds)
109
+ * -(np.log(float(self.max_scale)) / (half_d_feature - 1))
110
+ )
111
+ pe[:, :half_d_feature] = torch.sin(positions * div_term)
112
+ pe[:, half_d_feature:] = torch.cos(positions * div_term)
113
+ return pe[None].expand(batch_size, -1, -1)
114
+
115
+
116
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PegasusX
117
+ class PegasusXAttention(nn.Module):
118
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
119
+
120
+ def __init__(
121
+ self,
122
+ embed_dim: int,
123
+ num_heads: int,
124
+ dropout: float = 0.0,
125
+ is_decoder: bool = False,
126
+ bias: bool = True,
127
+ is_causal: bool = False,
128
+ config: Optional[PegasusXConfig] = None,
129
+ ):
130
+ super().__init__()
131
+ self.embed_dim = embed_dim
132
+ self.num_heads = num_heads
133
+ self.dropout = dropout
134
+ self.head_dim = embed_dim // num_heads
135
+ self.config = config
136
+
137
+ if (self.head_dim * num_heads) != self.embed_dim:
138
+ raise ValueError(
139
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
140
+ f" and `num_heads`: {num_heads})."
141
+ )
142
+ self.scaling = self.head_dim**-0.5
143
+ self.is_decoder = is_decoder
144
+ self.is_causal = is_causal
145
+
146
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
147
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
148
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
149
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
150
+
151
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
152
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
153
+
154
+ def forward(
155
+ self,
156
+ hidden_states: torch.Tensor,
157
+ key_value_states: Optional[torch.Tensor] = None,
158
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
159
+ attention_mask: Optional[torch.Tensor] = None,
160
+ layer_head_mask: Optional[torch.Tensor] = None,
161
+ output_attentions: bool = False,
162
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
163
+ """Input shape: Batch x Time x Channel"""
164
+
165
+ # if key_value_states are provided this layer is used as a cross-attention layer
166
+ # for the decoder
167
+ is_cross_attention = key_value_states is not None
168
+
169
+ bsz, tgt_len, _ = hidden_states.size()
170
+
171
+ # get query proj
172
+ query_states = self.q_proj(hidden_states) * self.scaling
173
+ # get key, value proj
174
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
175
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
176
+ # the provided `key_value_states` to support prefix tuning
177
+ if (
178
+ is_cross_attention
179
+ and past_key_value is not None
180
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
181
+ ):
182
+ # reuse k,v, cross_attentions
183
+ key_states = past_key_value[0]
184
+ value_states = past_key_value[1]
185
+ elif is_cross_attention:
186
+ # cross_attentions
187
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
188
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
189
+ elif past_key_value is not None:
190
+ # reuse k, v, self_attention
191
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
192
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
193
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
194
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
195
+ else:
196
+ # self_attention
197
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
198
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
199
+
200
+ if self.is_decoder:
201
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
202
+ # Further calls to cross_attention layer can then reuse all cross-attention
203
+ # key/value_states (first "if" case)
204
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
205
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
206
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
207
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
208
+ past_key_value = (key_states, value_states)
209
+
210
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
211
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
212
+ key_states = key_states.reshape(*proj_shape)
213
+ value_states = value_states.reshape(*proj_shape)
214
+
215
+ src_len = key_states.size(1)
216
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
217
+
218
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
219
+ raise ValueError(
220
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
221
+ f" {attn_weights.size()}"
222
+ )
223
+
224
+ if attention_mask is not None:
225
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
226
+ raise ValueError(
227
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
228
+ )
229
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
230
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
231
+
232
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
233
+
234
+ if layer_head_mask is not None:
235
+ if layer_head_mask.size() != (self.num_heads,):
236
+ raise ValueError(
237
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
238
+ f" {layer_head_mask.size()}"
239
+ )
240
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
241
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
242
+
243
+ if output_attentions:
244
+ # this operation is a bit awkward, but it's required to
245
+ # make sure that attn_weights keeps its gradient.
246
+ # In order to do so, attn_weights have to be reshaped
247
+ # twice and have to be reused in the following
248
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
249
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
250
+ else:
251
+ attn_weights_reshaped = None
252
+
253
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
254
+
255
+ attn_output = torch.bmm(attn_probs, value_states)
256
+
257
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
258
+ raise ValueError(
259
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
260
+ f" {attn_output.size()}"
261
+ )
262
+
263
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
264
+ attn_output = attn_output.transpose(1, 2)
265
+
266
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
267
+ # partitioned across GPUs when using tensor-parallelism.
268
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
269
+
270
+ attn_output = self.out_proj(attn_output)
271
+
272
+ return attn_output, attn_weights_reshaped, past_key_value
273
+
274
+
275
+ class PegasusXGlobalLocalAttention(nn.Module):
276
+ """Global + Local attention. For use with Encoder only."""
277
+
278
+ def __init__(
279
+ self,
280
+ embed_dim: int,
281
+ num_heads: int,
282
+ block_size: int,
283
+ dropout: float = 0.0,
284
+ is_decoder: bool = False,
285
+ ):
286
+ super().__init__()
287
+ self.embed_dim = embed_dim
288
+ self.num_heads = num_heads
289
+ self.block_size = block_size
290
+ self.dropout = dropout
291
+ self.head_dim = embed_dim // num_heads
292
+
293
+ if (self.head_dim * num_heads) != self.embed_dim:
294
+ raise ValueError(
295
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
296
+ f" and `num_heads`: {num_heads})."
297
+ )
298
+ self.scaling = self.head_dim**-0.5
299
+ self.is_decoder = is_decoder
300
+
301
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
302
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=False)
303
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False)
304
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False)
305
+
306
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
307
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
308
+
309
+ def forward(
310
+ self,
311
+ token_hidden_states: torch.Tensor,
312
+ global_hidden_states: torch.Tensor,
313
+ attention_mask: Optional[torch.Tensor] = None,
314
+ output_attentions: bool = False,
315
+ ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
316
+ """Input shape: Batch x Time x Channel"""
317
+ dim = DimensionInfo(
318
+ batch_size=token_hidden_states.shape[0],
319
+ seq_len=token_hidden_states.shape[1],
320
+ block_size=self.block_size,
321
+ num_heads=self.num_heads,
322
+ hidden_dim=token_hidden_states.shape[2],
323
+ dim_per_head=self.head_dim,
324
+ num_blocks=token_hidden_states.shape[1] // self.block_size,
325
+ global_len=global_hidden_states.shape[1],
326
+ padded_seq_len=token_hidden_states.shape[1],
327
+ )
328
+
329
+ # [batch_size, num_heads, padded_seq_len, dim_per_head]
330
+ local_q = self._shape(
331
+ self.q_proj(token_hidden_states) * self.scaling,
332
+ seq_len=dim.padded_seq_len,
333
+ bsz=dim.batch_size,
334
+ )
335
+ local_k = self._shape(
336
+ self.k_proj(token_hidden_states),
337
+ seq_len=dim.padded_seq_len,
338
+ bsz=dim.batch_size,
339
+ )
340
+ local_v = self._shape(
341
+ self.v_proj(token_hidden_states),
342
+ seq_len=dim.padded_seq_len,
343
+ bsz=dim.batch_size,
344
+ )
345
+
346
+ # [batch_size, num_heads, global_len, dim_per_head]
347
+ global_q = self._shape(
348
+ self.q_proj(global_hidden_states) * self.scaling,
349
+ seq_len=dim.global_len,
350
+ bsz=dim.batch_size,
351
+ )
352
+ global_k = self._shape(
353
+ self.k_proj(global_hidden_states),
354
+ seq_len=dim.global_len,
355
+ bsz=dim.batch_size,
356
+ )
357
+ global_v = self._shape(
358
+ self.v_proj(global_hidden_states),
359
+ seq_len=dim.global_len,
360
+ bsz=dim.batch_size,
361
+ )
362
+
363
+ global_attn_output, global_attn_probs = self.compute_global_attention_representations(
364
+ global_q=global_q,
365
+ global_k=global_k,
366
+ global_v=global_v,
367
+ local_k=local_k,
368
+ local_v=local_v,
369
+ mask=attention_mask,
370
+ dim=dim,
371
+ )
372
+ local_attn_output, local_attn_probs = self.compute_local_attention_representations(
373
+ global_k=global_k,
374
+ global_v=global_v,
375
+ local_q=local_q,
376
+ local_k=local_k,
377
+ local_v=local_v,
378
+ mask=attention_mask,
379
+ dim=dim,
380
+ )
381
+
382
+ # [batch_size, global_len, hidden_dim]
383
+ global_attn_output = (
384
+ global_attn_output.transpose(1, 2).contiguous().view(dim.batch_size, dim.global_len, dim.hidden_dim)
385
+ )
386
+ # [batch_size, global_len, hidden_dim]
387
+ global_attn_output = self.out_proj(global_attn_output)
388
+ # [batch_size, num_heads, block_size, num_heads, dim_per_head]
389
+ local_attn_output = local_attn_output.permute(0, 2, 3, 1, 4).contiguous()
390
+ # [batch_size, padded_seq_len, hidden_dim]
391
+ local_attn_output = local_attn_output.view(dim.batch_size, dim.padded_seq_len, dim.hidden_dim)
392
+ # [batch_size, padded_seq_len, hidden_dim]
393
+ local_attn_output = self.out_proj(local_attn_output)
394
+
395
+ if output_attentions:
396
+ attn_probs = {"global": global_attn_probs, "local": local_attn_probs}
397
+ else:
398
+ attn_probs = None
399
+
400
+ return local_attn_output, global_attn_output, attn_probs
401
+
402
+ def compute_global_attention_representations(
403
+ self, global_q, global_k, global_v, local_k, local_v, mask, dim: DimensionInfo
404
+ ):
405
+ """Compute attention representations for global tokens.
406
+
407
+ Global tokens will attend to both global tokens as well as all input sequence tokens. Because the input
408
+ sequence tokens are arranged in blocks for local attention, we unblock them and compute attention.
409
+
410
+ Args:
411
+ global_q (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
412
+ query vectors from global tokens
413
+ global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
414
+ key vectors from global tokens
415
+ global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
416
+ value vectors from global tokens
417
+ local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
418
+ key vectors from local tokens
419
+ local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
420
+ value vectors from local tokens
421
+ mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
422
+ dim (DimensionInfo): DimensionInfo wrapper for dimensions
423
+
424
+ Returns:
425
+ output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
426
+ """
427
+ # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head]
428
+ global_and_local_k = torch.cat([global_k, local_k], dim=2)
429
+ # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head]
430
+ global_and_local_v = torch.cat([global_v, local_v], dim=2)
431
+
432
+ # [batch_size, global_len+padded_seq_len]
433
+ extended_mask = nn.functional.pad(mask, pad=(dim.global_len, 0), value=0)
434
+
435
+ # [batch_size, num_heads, global_len, global_len+padded_seq_len]
436
+ attn_weights = torch.einsum("BHGF,BHXF->BHGX", global_q, global_and_local_k)
437
+ attn_weights = attn_weights + extended_mask[:, None, None, :]
438
+ attn_probs = nn.functional.softmax(attn_weights, dim=-1)
439
+ attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
440
+
441
+ # [batch_size, num_heads, global_len, F]
442
+ attn_output = torch.einsum("BHGX,BHXF->BHGF", attn_probs, global_and_local_v)
443
+ return attn_output, attn_probs
444
+
445
+ def compute_local_attention_representations(
446
+ self, global_k, global_v, local_q, local_k, local_v, mask, dim: DimensionInfo
447
+ ):
448
+ """Compute attention representations for local tokens.
449
+
450
+ Local tokens will attend to both global tokens as well as all other tokens within the same local block. Hence,
451
+ we need to tile and concatenate the global tokens to every local block
452
+
453
+ Args:
454
+ global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
455
+ key vectors from global tokens
456
+ global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]:
457
+ value vectors from global tokens
458
+ local_q (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
459
+ query vectors from local tokens
460
+ local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
461
+ key vectors from local tokens
462
+ local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]:
463
+ value vectors from local tokens
464
+ mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask
465
+ dim (DimensionInfo): DimensionInfo wrapper for dimensions
466
+
467
+ Returns:
468
+ output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size
469
+ """
470
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
471
+ blocked_local_q = local_q.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
472
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
473
+ blocked_local_k = local_k.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
474
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
475
+ blocked_local_v = local_v.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head)
476
+
477
+ # [batch_size, num_blocks, global_len+block_size]
478
+ extended_mask = nn.functional.pad(
479
+ mask.view(dim.batch_size, dim.num_blocks, dim.block_size),
480
+ pad=(dim.global_len, 0),
481
+ value=0,
482
+ )
483
+
484
+ # [batch_size, num_heads, num_blocks, block_size, global_len]
485
+ blocked_local2global = torch.einsum("BHNKF,BHGF->BHNKG", blocked_local_q, global_k)
486
+ # [batch_size, num_heads, num_blocks, block_size, block_size]
487
+ blocked_local2local = torch.einsum("BHNKF,BHNXF->BHNKX", blocked_local_q, blocked_local_k)
488
+
489
+ # [batch_size, num_heads, num_blocks, block_size, global_len+block_size]
490
+ attn_weights = torch.cat([blocked_local2global, blocked_local2local], dim=-1)
491
+ attn_weights = attn_weights + extended_mask[:, None, :, None, :]
492
+ attn_probs = nn.functional.softmax(attn_weights, dim=-1)
493
+ attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
494
+
495
+ # [batch_size, num_heads, num_blocks, block_size, global_len]
496
+ local2global_attn_probs = attn_probs[:, :, :, :, : dim.global_len]
497
+ # [batch_size, num_heads, num_blocks, block_size, block_size]
498
+ local2local_attn_probs = attn_probs[:, :, :, :, dim.global_len :]
499
+
500
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
501
+ local2global_attn_output = torch.einsum("BHNKG,BHGF->BHNKF", local2global_attn_probs, global_v)
502
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
503
+ local2local_attn_output = torch.einsum("BHNKX,BHNXF->BHNKF", local2local_attn_probs, blocked_local_v)
504
+ # [batch_size, num_heads, num_blocks, block_size, dim_per_head]
505
+ attn_output = local2global_attn_output + local2local_attn_output
506
+ return attn_output, attn_probs
507
+
508
+
509
+ class PegasusXEncoderLayer(nn.Module):
510
+ def __init__(self, stagger_blocks_this_layer: bool, config: PegasusXConfig):
511
+ super().__init__()
512
+ self.embed_dim = config.d_model
513
+ self.self_attn = PegasusXGlobalLocalAttention(
514
+ embed_dim=self.embed_dim,
515
+ num_heads=config.encoder_attention_heads,
516
+ block_size=config.block_size,
517
+ dropout=config.attention_dropout,
518
+ )
519
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
520
+ self.global_self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
521
+ self.dropout = config.dropout
522
+ self.activation_fn = ACT2FN[config.activation_function]
523
+ self.activation_dropout = config.activation_dropout
524
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
525
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
526
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
527
+ self.stagger_blocks_this_layer = stagger_blocks_this_layer
528
+ self.block_size = config.block_size
529
+
530
+ def forward(
531
+ self,
532
+ hidden_states: torch.Tensor,
533
+ global_hidden_states: torch.Tensor,
534
+ attention_mask: torch.Tensor,
535
+ output_attentions: bool = False,
536
+ ) -> torch.Tensor:
537
+ """
538
+ Args:
539
+ hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
540
+ global_hidden_states (`torch.FloatTensor`): global token hidden states
541
+ *(seq_len, num_global_tokens, embed_dim)*
542
+ attention_mask (`torch.FloatTensor`): attention mask of size
543
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
544
+ output_attentions (`bool`, *optional*):
545
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
546
+ returned tensors for more detail.
547
+ """
548
+ residual = hidden_states
549
+ global_residual = global_hidden_states
550
+
551
+ hidden_states = self.self_attn_layer_norm(hidden_states)
552
+ global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states)
553
+
554
+ if self.stagger_blocks_this_layer:
555
+ # Pad the blocks to simulate staggering
556
+ hidden_states, attention_mask = self.pad_local_tokens(
557
+ hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size
558
+ )
559
+
560
+ hidden_states, global_hidden_states, attn_weights = self.self_attn(
561
+ token_hidden_states=hidden_states,
562
+ global_hidden_states=global_hidden_states,
563
+ attention_mask=attention_mask,
564
+ output_attentions=output_attentions,
565
+ )
566
+
567
+ if self.stagger_blocks_this_layer:
568
+ # Undo the padding
569
+ hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size)
570
+
571
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
572
+ hidden_states = residual + hidden_states
573
+
574
+ global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
575
+ global_hidden_states = global_residual + global_hidden_states
576
+
577
+ residual = hidden_states
578
+ hidden_states = self.final_layer_norm(hidden_states)
579
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
580
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
581
+ hidden_states = self.fc2(hidden_states)
582
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
583
+ hidden_states = residual + hidden_states
584
+
585
+ global_residual = global_hidden_states
586
+ global_hidden_states = self.final_layer_norm(global_hidden_states)
587
+ global_hidden_states = self.activation_fn(self.fc1(global_hidden_states))
588
+ global_hidden_states = nn.functional.dropout(
589
+ global_hidden_states, p=self.activation_dropout, training=self.training
590
+ )
591
+ global_hidden_states = self.fc2(global_hidden_states)
592
+ global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training)
593
+ global_hidden_states = global_residual + global_hidden_states
594
+ outputs = (hidden_states, global_hidden_states)
595
+
596
+ if output_attentions:
597
+ outputs += (attn_weights,)
598
+
599
+ return outputs
600
+
601
+ @classmethod
602
+ def pad_local_tokens(cls, hidden_states, attention_mask, block_size):
603
+ # hidden_states: [batch_size, seq_len, hidden_dim]
604
+ pad_size = block_size // 2
605
+ mask_min_value = torch.finfo(hidden_states.dtype).min
606
+ padded_hidden_states = torch.nn.functional.pad(
607
+ hidden_states,
608
+ pad=(0, 0, pad_size, pad_size),
609
+ )
610
+ padded_mask = torch.nn.functional.pad(
611
+ attention_mask,
612
+ pad=(pad_size, pad_size),
613
+ value=mask_min_value,
614
+ )
615
+ return padded_hidden_states, padded_mask
616
+
617
+ @classmethod
618
+ def unpad_local_tokens(cls, padded_hidden_states, block_size):
619
+ # padded_hidden_states: [batch_size, padded seq_len, hidden_dim]
620
+ pad_size = block_size // 2
621
+ return padded_hidden_states[:, pad_size:-pad_size, :]
622
+
623
+
624
+ class PegasusXDecoderLayer(nn.Module):
625
+ def __init__(self, config: PegasusXConfig):
626
+ super().__init__()
627
+ self.embed_dim = config.d_model
628
+
629
+ self.self_attn = PegasusXAttention(
630
+ embed_dim=self.embed_dim,
631
+ num_heads=config.decoder_attention_heads,
632
+ dropout=config.attention_dropout,
633
+ is_decoder=True,
634
+ bias=False,
635
+ )
636
+ self.dropout = config.dropout
637
+ self.activation_fn = ACT2FN[config.activation_function]
638
+ self.activation_dropout = config.activation_dropout
639
+
640
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
641
+ self.encoder_attn = PegasusXAttention(
642
+ self.embed_dim,
643
+ config.decoder_attention_heads,
644
+ dropout=config.attention_dropout,
645
+ is_decoder=True,
646
+ bias=False,
647
+ )
648
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
649
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
650
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
651
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
652
+
653
+ def forward(
654
+ self,
655
+ hidden_states: torch.Tensor,
656
+ attention_mask: Optional[torch.Tensor] = None,
657
+ encoder_hidden_states: Optional[torch.Tensor] = None,
658
+ encoder_attention_mask: Optional[torch.Tensor] = None,
659
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
660
+ output_attentions: Optional[bool] = False,
661
+ use_cache: Optional[bool] = True,
662
+ ) -> torch.Tensor:
663
+ """
664
+ Args:
665
+ hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
666
+ attention_mask (`torch.FloatTensor`): attention mask of size
667
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
668
+ encoder_hidden_states (`torch.FloatTensor`):
669
+ cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
670
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
671
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
672
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
673
+ output_attentions (`bool`, *optional*):
674
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
675
+ returned tensors for more detail.
676
+ use_cache: Whether to us KV cache for decoding
677
+ """
678
+ residual = hidden_states
679
+ hidden_states = self.self_attn_layer_norm(hidden_states)
680
+
681
+ # Self Attention
682
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
683
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
684
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
685
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
686
+ hidden_states=hidden_states,
687
+ past_key_value=self_attn_past_key_value,
688
+ attention_mask=attention_mask,
689
+ output_attentions=output_attentions,
690
+ )
691
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
692
+ hidden_states = residual + hidden_states
693
+
694
+ # Cross-Attention Block
695
+ cross_attn_present_key_value = None
696
+ cross_attn_weights = None
697
+ if encoder_hidden_states is not None:
698
+ residual = hidden_states
699
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
700
+
701
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
702
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
703
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
704
+ hidden_states=hidden_states,
705
+ key_value_states=encoder_hidden_states,
706
+ attention_mask=encoder_attention_mask,
707
+ past_key_value=cross_attn_past_key_value,
708
+ output_attentions=output_attentions,
709
+ )
710
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
711
+ hidden_states = residual + hidden_states
712
+
713
+ # add cross-attn to positions 3,4 of present_key_value tuple
714
+ present_key_value = present_key_value + cross_attn_present_key_value
715
+
716
+ # Fully Connected
717
+ residual = hidden_states
718
+ hidden_states = self.final_layer_norm(hidden_states)
719
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
720
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
721
+ hidden_states = self.fc2(hidden_states)
722
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
723
+ hidden_states = residual + hidden_states
724
+
725
+ outputs = (hidden_states,)
726
+
727
+ if output_attentions:
728
+ outputs += (self_attn_weights, cross_attn_weights)
729
+
730
+ if use_cache:
731
+ outputs += (present_key_value,)
732
+
733
+ return outputs
734
+
735
+
736
+ class PegasusXPreTrainedModel(PreTrainedModel):
737
+ config_class = PegasusXConfig
738
+ base_model_prefix = "model"
739
+ supports_gradient_checkpointing = True
740
+ _no_split_modules = [r"PegasusXEncoderLayer", r"PegasusXDecoderLayer"]
741
+
742
+ def _init_weights(self, module):
743
+ std = self.config.init_std
744
+ if isinstance(module, nn.Linear):
745
+ module.weight.data.normal_(mean=0.0, std=std)
746
+ if module.bias is not None:
747
+ module.bias.data.zero_()
748
+ elif isinstance(module, nn.Embedding):
749
+ module.weight.data.normal_(mean=0.0, std=std)
750
+
751
+
752
+ PEGASUS_X_START_DOCSTRING = r"""
753
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
754
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
755
+ etc.)
756
+
757
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
758
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
759
+ and behavior.
760
+
761
+ Parameters:
762
+ config ([`PegasusXConfig`]):
763
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
764
+ load the weights associated with the model, only the configuration. Check out the
765
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
766
+ """
767
+
768
+ PEGASUS_X_GENERATION_EXAMPLE = r"""
769
+ Summarization example:
770
+
771
+ ```python
772
+ >>> from transformers import AutoTokenizer, PegasusXForConditionalGeneration
773
+
774
+ >>> model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base")
775
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large")
776
+
777
+ >>> ARTICLE_TO_SUMMARIZE = (
778
+ ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
779
+ ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
780
+ ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
781
+ ... )
782
+ >>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
783
+
784
+ >>> # Generate Summary
785
+ >>> summary_ids = model.generate(inputs["input_ids"])
786
+ >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
787
+ "California's largest electricity provider has turned off power to hundreds of thousands of customers."
788
+ ```
789
+ """
790
+
791
+ PEGASUS_X_INPUTS_DOCSTRING = r"""
792
+ Args:
793
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
794
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
795
+ it.
796
+
797
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
798
+ [`PreTrainedTokenizer.__call__`] for details.
799
+
800
+ [What are input IDs?](../glossary#input-ids)
801
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
802
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
803
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
804
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
805
+
806
+ - 1 for tokens that are **not masked**,
807
+ - 0 for tokens that are **masked**.
808
+
809
+ [What are attention masks?](../glossary#attention-mask)
810
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
811
+ Indices of decoder input sequence tokens in the vocabulary.
812
+
813
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
814
+ [`PreTrainedTokenizer.__call__`] for details.
815
+
816
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
817
+
818
+ PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
819
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
820
+ `past_key_values`).
821
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
822
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
823
+ be used by default.
824
+
825
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
826
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
827
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
828
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
829
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
830
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
831
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
832
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
833
+
834
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
835
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
836
+
837
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
838
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
839
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
840
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
841
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
842
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
843
+ than the model's internal embedding lookup matrix.
844
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
845
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
846
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
847
+ input (see `past_key_values`). This is useful if you want more control over how to convert
848
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
849
+
850
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
851
+ of `inputs_embeds`.
852
+ use_cache (`bool`, *optional*):
853
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
854
+ `past_key_values`).
855
+ output_attentions (`bool`, *optional*):
856
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
857
+ tensors for more detail.
858
+ output_hidden_states (`bool`, *optional*):
859
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
860
+ more detail.
861
+ return_dict (`bool`, *optional*):
862
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
863
+ """
864
+
865
+
866
+ class PegasusXEncoder(PegasusXPreTrainedModel):
867
+ """
868
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
869
+ [`PegasusXEncoderLayer`].
870
+
871
+ Args:
872
+ config: PegasusXConfig
873
+ embed_tokens (nn.Embedding): output embedding
874
+ """
875
+
876
+ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None):
877
+ super().__init__(config)
878
+
879
+ self.dropout = config.dropout
880
+ self.layerdrop = config.encoder_layerdrop
881
+
882
+ embed_dim = config.d_model
883
+ self.max_source_positions = config.max_position_embeddings
884
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
885
+
886
+ if embed_tokens is not None:
887
+ self.embed_tokens = embed_tokens
888
+ else:
889
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim)
890
+
891
+ self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim)
892
+ self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim)
893
+ self.layers = nn.ModuleList(
894
+ [
895
+ PegasusXEncoderLayer(
896
+ stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config
897
+ )
898
+ for i in range(config.encoder_layers)
899
+ ]
900
+ )
901
+ self.layer_norm = nn.LayerNorm(config.d_model)
902
+
903
+ self.gradient_checkpointing = False
904
+ # Initialize weights and apply final processing
905
+ self.post_init()
906
+
907
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
908
+ """
909
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
910
+ config.max_position_embeddings`.
911
+
912
+ Arguments:
913
+ new_num_position_embeddings (`int`):
914
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
915
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
916
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
917
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
918
+ will remove vectors from the end.
919
+ """
920
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
921
+ self.config.max_position_embeddings = new_num_position_embeddings
922
+
923
+ self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model)
924
+ self.embed_positions.to(self.device)
925
+
926
+ def get_position_embeddings(self) -> nn.Embedding:
927
+ """
928
+ Returns the position embeddings matrix
929
+ """
930
+ return self.embed_positions
931
+
932
+ def forward(
933
+ self,
934
+ input_ids=None,
935
+ attention_mask=None,
936
+ inputs_embeds=None,
937
+ output_attentions=None,
938
+ output_hidden_states=None,
939
+ return_dict=None,
940
+ ):
941
+ r"""
942
+ Args:
943
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
944
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
945
+ provide it.
946
+
947
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
948
+ [`PreTrainedTokenizer.__call__`] for details.
949
+
950
+ [What are input IDs?](../glossary#input-ids)
951
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
952
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
953
+
954
+ - 1 for tokens that are **not masked**,
955
+ - 0 for tokens that are **masked**.
956
+
957
+ [What are attention masks?](../glossary#attention-mask)
958
+
959
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
960
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
961
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
962
+ than the model's internal embedding lookup matrix.
963
+ output_attentions (`bool`, *optional*):
964
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
965
+ returned tensors for more detail.
966
+ output_hidden_states (`bool`, *optional*):
967
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
968
+ for more detail.
969
+ return_dict (`bool`, *optional*):
970
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
971
+ """
972
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
973
+ output_hidden_states = (
974
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
975
+ )
976
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
977
+
978
+ # retrieve input_ids and inputs_embeds
979
+ if input_ids is not None and inputs_embeds is not None:
980
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
981
+ elif input_ids is not None:
982
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
983
+ input_shape = input_ids.size()
984
+ input_ids = input_ids.view(-1, input_shape[-1])
985
+ elif inputs_embeds is not None:
986
+ input_shape = inputs_embeds.size()[:-1]
987
+ else:
988
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
989
+
990
+ if inputs_embeds is None:
991
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
992
+
993
+ embed_pos = self.embed_positions(inputs_embeds)
994
+
995
+ hidden_states = inputs_embeds + embed_pos
996
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
997
+
998
+ batch_size, seq_len, _ = hidden_states.shape
999
+
1000
+ # Setup mask
1001
+ if attention_mask is None:
1002
+ attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device)
1003
+ attention_mask = attention_mask.to(dtype=hidden_states.dtype)
1004
+ mask_min_value = torch.finfo(hidden_states.dtype).min
1005
+ inverted_mask = 1.0 - attention_mask
1006
+ attention_mask = inverted_mask.masked_fill(
1007
+ inverted_mask.to(torch.bool),
1008
+ mask_min_value,
1009
+ )
1010
+
1011
+ # padding to block_size
1012
+ if seq_len % self.config.block_size != 0:
1013
+ pad_len = self.config.block_size - seq_len % self.config.block_size
1014
+ hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0)
1015
+ attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value)
1016
+
1017
+ # Global tokens
1018
+ global_hidden_states = self.embed_global(
1019
+ torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1)
1020
+ )
1021
+
1022
+ encoder_states = () if output_hidden_states else None
1023
+ all_attentions = () if output_attentions else None
1024
+
1025
+ for idx, encoder_layer in enumerate(self.layers):
1026
+ if output_hidden_states:
1027
+ encoder_states = encoder_states + (hidden_states,)
1028
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1029
+ to_drop = False
1030
+ if self.training:
1031
+ dropout_probability = torch.rand([])
1032
+ if dropout_probability < self.layerdrop: # skip the layer
1033
+ to_drop = True
1034
+
1035
+ if to_drop:
1036
+ layer_outputs = (None, None)
1037
+ else:
1038
+ if self.gradient_checkpointing and self.training:
1039
+ layer_outputs = self._gradient_checkpointing_func(
1040
+ encoder_layer.__call__,
1041
+ hidden_states,
1042
+ global_hidden_states,
1043
+ attention_mask,
1044
+ output_attentions,
1045
+ )
1046
+ else:
1047
+ layer_outputs = encoder_layer(
1048
+ hidden_states,
1049
+ global_hidden_states,
1050
+ attention_mask,
1051
+ output_attentions=output_attentions,
1052
+ )
1053
+
1054
+ hidden_states = layer_outputs[0]
1055
+ global_hidden_states = layer_outputs[1]
1056
+
1057
+ if output_attentions:
1058
+ all_attentions = all_attentions + (layer_outputs[2],)
1059
+
1060
+ # Undo padding-to-block-size
1061
+ hidden_states = hidden_states[:, :seq_len]
1062
+
1063
+ hidden_states = self.layer_norm(hidden_states)
1064
+
1065
+ if output_hidden_states:
1066
+ encoder_states = encoder_states + ((hidden_states, global_hidden_states),)
1067
+
1068
+ if not return_dict:
1069
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
1070
+ return BaseModelOutput(
1071
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
1072
+ )
1073
+
1074
+
1075
+ class PegasusXDecoder(PegasusXPreTrainedModel):
1076
+ """
1077
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
1078
+
1079
+ Args:
1080
+ config: PegasusXConfig
1081
+ embed_tokens (nn.Embedding): output embedding
1082
+ """
1083
+
1084
+ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None):
1085
+ super().__init__(config)
1086
+ self.dropout = config.dropout
1087
+ self.layerdrop = config.decoder_layerdrop
1088
+ self.max_target_positions = config.max_position_embeddings
1089
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
1090
+
1091
+ if embed_tokens is not None:
1092
+ self.embed_tokens = embed_tokens
1093
+ else:
1094
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
1095
+
1096
+ self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model)
1097
+ self.layers = nn.ModuleList([PegasusXDecoderLayer(config) for _ in range(config.decoder_layers)])
1098
+ self.layer_norm = nn.LayerNorm(config.d_model)
1099
+
1100
+ self.gradient_checkpointing = False
1101
+ # Initialize weights and apply final processing
1102
+ self.post_init()
1103
+
1104
+ def get_input_embeddings(self):
1105
+ return self.embed_tokens
1106
+
1107
+ def set_input_embeddings(self, value):
1108
+ self.embed_tokens = value
1109
+
1110
+ def forward(
1111
+ self,
1112
+ input_ids=None,
1113
+ attention_mask=None,
1114
+ encoder_hidden_states=None,
1115
+ encoder_attention_mask=None,
1116
+ past_key_values=None,
1117
+ inputs_embeds=None,
1118
+ use_cache=None,
1119
+ output_attentions=None,
1120
+ output_hidden_states=None,
1121
+ return_dict=None,
1122
+ ):
1123
+ r"""
1124
+ Args:
1125
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1126
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
1127
+ provide it.
1128
+
1129
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1130
+ [`PreTrainedTokenizer.__call__`] for details.
1131
+
1132
+ [What are input IDs?](../glossary#input-ids)
1133
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1134
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1135
+
1136
+ - 1 for tokens that are **not masked**,
1137
+ - 0 for tokens that are **masked**.
1138
+
1139
+ [What are attention masks?](../glossary#attention-mask)
1140
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
1141
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
1142
+ of the decoder.
1143
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
1144
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
1145
+ selected in `[0, 1]`:
1146
+
1147
+ - 1 for tokens that are **not masked**,
1148
+ - 0 for tokens that are **masked**.
1149
+
1150
+ [What are attention masks?](../glossary#attention-mask)
1151
+
1152
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1153
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1154
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
1155
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1156
+
1157
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
1158
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1159
+
1160
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
1161
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
1162
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1163
+ inputs_embeds (`torch.FloatTensor` of
1164
+ shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
1165
+ `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
1166
+ control over how to convert `input_ids` indices into associated vectors than the model's internal
1167
+ embedding lookup matrix.
1168
+ output_attentions (`bool`, *optional*):
1169
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1170
+ returned tensors for more detail.
1171
+ output_hidden_states (`bool`, *optional*):
1172
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
1173
+ for more detail.
1174
+ return_dict (`bool`, *optional*):
1175
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1176
+ """
1177
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1178
+ output_hidden_states = (
1179
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1180
+ )
1181
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1182
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1183
+
1184
+ # retrieve input_ids and inputs_embeds
1185
+ if input_ids is not None and inputs_embeds is not None:
1186
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1187
+ elif input_ids is not None:
1188
+ input_shape = input_ids.size()
1189
+ input_ids = input_ids.view(-1, input_shape[-1])
1190
+ elif inputs_embeds is not None:
1191
+ input_shape = inputs_embeds.size()[:-1]
1192
+ else:
1193
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1194
+
1195
+ # past_key_values_length
1196
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
1197
+
1198
+ if inputs_embeds is None:
1199
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
1200
+
1201
+ attention_mask = _prepare_4d_causal_attention_mask(
1202
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
1203
+ )
1204
+
1205
+ # expand encoder attention mask
1206
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
1207
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1208
+ encoder_attention_mask = _prepare_4d_attention_mask(
1209
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
1210
+ )
1211
+
1212
+ # embed positions
1213
+ positions = self.embed_positions(inputs_embeds, past_key_values_length)
1214
+
1215
+ positions = positions.to(inputs_embeds.device)
1216
+
1217
+ hidden_states = inputs_embeds + positions
1218
+
1219
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
1220
+
1221
+ if self.gradient_checkpointing and self.training:
1222
+ if use_cache:
1223
+ logger.warning_once(
1224
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1225
+ )
1226
+ use_cache = False
1227
+
1228
+ # decoder layers
1229
+ all_hidden_states = () if output_hidden_states else None
1230
+ all_self_attns = () if output_attentions else None
1231
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
1232
+ next_decoder_cache = () if use_cache else None
1233
+
1234
+ for idx, decoder_layer in enumerate(self.layers):
1235
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
1236
+ if output_hidden_states:
1237
+ all_hidden_states += (hidden_states,)
1238
+ if self.training:
1239
+ dropout_probability = torch.rand([])
1240
+ if dropout_probability < self.layerdrop:
1241
+ continue
1242
+
1243
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1244
+
1245
+ if self.gradient_checkpointing and self.training:
1246
+ layer_outputs = self._gradient_checkpointing_func(
1247
+ decoder_layer.__call__,
1248
+ hidden_states,
1249
+ attention_mask,
1250
+ encoder_hidden_states,
1251
+ encoder_attention_mask,
1252
+ None,
1253
+ output_attentions,
1254
+ use_cache,
1255
+ )
1256
+ else:
1257
+ layer_outputs = decoder_layer(
1258
+ hidden_states,
1259
+ attention_mask=attention_mask,
1260
+ encoder_hidden_states=encoder_hidden_states,
1261
+ encoder_attention_mask=encoder_attention_mask,
1262
+ past_key_value=past_key_value,
1263
+ output_attentions=output_attentions,
1264
+ use_cache=use_cache,
1265
+ )
1266
+ hidden_states = layer_outputs[0]
1267
+
1268
+ if use_cache:
1269
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
1270
+
1271
+ if output_attentions:
1272
+ all_self_attns += (layer_outputs[1],)
1273
+
1274
+ if encoder_hidden_states is not None:
1275
+ all_cross_attentions += (layer_outputs[2],)
1276
+
1277
+ hidden_states = self.layer_norm(hidden_states)
1278
+
1279
+ # add hidden states from the last decoder layer
1280
+ if output_hidden_states:
1281
+ all_hidden_states += (hidden_states,)
1282
+
1283
+ next_cache = next_decoder_cache if use_cache else None
1284
+ if not return_dict:
1285
+ return tuple(
1286
+ v
1287
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
1288
+ if v is not None
1289
+ )
1290
+ return BaseModelOutputWithPastAndCrossAttentions(
1291
+ last_hidden_state=hidden_states,
1292
+ past_key_values=next_cache,
1293
+ hidden_states=all_hidden_states,
1294
+ attentions=all_self_attns,
1295
+ cross_attentions=all_cross_attentions,
1296
+ )
1297
+
1298
+
1299
+ @add_start_docstrings(
1300
+ "The bare PEGASUS-X Model outputting raw hidden-states without any specific head on top.",
1301
+ PEGASUS_X_START_DOCSTRING,
1302
+ )
1303
+ class PegasusXModel(PegasusXPreTrainedModel):
1304
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
1305
+
1306
+ def __init__(self, config: PegasusXConfig):
1307
+ super().__init__(config)
1308
+
1309
+ vocab_size = config.vocab_size
1310
+ self.shared = nn.Embedding(vocab_size, config.d_model)
1311
+
1312
+ self.encoder = PegasusXEncoder(config, self.shared)
1313
+ self.decoder = PegasusXDecoder(config, self.shared)
1314
+
1315
+ # Initialize weights and apply final processing
1316
+ self.post_init()
1317
+
1318
+ def get_input_embeddings(self):
1319
+ return self.shared
1320
+
1321
+ def set_input_embeddings(self, value):
1322
+ self.shared = value
1323
+ self.encoder.embed_tokens = self.shared
1324
+ self.decoder.embed_tokens = self.shared
1325
+
1326
+ def get_encoder(self):
1327
+ return self.encoder
1328
+
1329
+ def get_decoder(self):
1330
+ return self.decoder
1331
+
1332
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1333
+ """
1334
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
1335
+ config.max_position_embeddings`.
1336
+
1337
+ Arguments:
1338
+ new_num_position_embeddings (`int`):
1339
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1340
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1341
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1342
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1343
+ will remove vectors from the end.
1344
+ """
1345
+ self.config.max_position_embeddings = new_num_position_embeddings
1346
+ self.encoder.resize_position_embeddings(new_num_position_embeddings)
1347
+ self.decoder.resize_position_embeddings(new_num_position_embeddings)
1348
+
1349
+ def get_position_embeddings(self) -> Tuple[nn.Embedding]:
1350
+ """
1351
+ Returns the position embeddings matrix
1352
+ """
1353
+ return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
1354
+
1355
+ @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING)
1356
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1357
+ def forward(
1358
+ self,
1359
+ input_ids: Optional[torch.Tensor] = None,
1360
+ attention_mask: Optional[torch.Tensor] = None,
1361
+ decoder_input_ids: Optional[torch.Tensor] = None,
1362
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1363
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
1364
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1365
+ inputs_embeds: Optional[torch.Tensor] = None,
1366
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1367
+ use_cache: Optional[bool] = None,
1368
+ output_attentions: Optional[bool] = None,
1369
+ output_hidden_states: Optional[bool] = None,
1370
+ return_dict: Optional[bool] = None,
1371
+ ) -> Union[Tuple, Seq2SeqModelOutput]:
1372
+ r"""
1373
+ Returns:
1374
+
1375
+ Example:
1376
+
1377
+ ```python
1378
+ >>> from transformers import AutoTokenizer, PegasusModel
1379
+
1380
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large")
1381
+ >>> model = PegasusModel.from_pretrained("google/pegasus-x-large")
1382
+
1383
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
1384
+ >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
1385
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
1386
+
1387
+ >>> last_hidden_states = outputs.last_hidden_state
1388
+ >>> list(last_hidden_states.shape)
1389
+ [1, 4, 1024]
1390
+ ```"""
1391
+
1392
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1393
+ output_hidden_states = (
1394
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1395
+ )
1396
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1397
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1398
+
1399
+ if encoder_outputs is None:
1400
+ encoder_outputs = self.encoder(
1401
+ input_ids=input_ids,
1402
+ attention_mask=attention_mask,
1403
+ inputs_embeds=inputs_embeds,
1404
+ output_attentions=output_attentions,
1405
+ output_hidden_states=output_hidden_states,
1406
+ return_dict=return_dict,
1407
+ )
1408
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
1409
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1410
+ encoder_outputs = BaseModelOutput(
1411
+ last_hidden_state=encoder_outputs[0],
1412
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1413
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1414
+ )
1415
+
1416
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
1417
+ decoder_outputs = self.decoder(
1418
+ input_ids=decoder_input_ids,
1419
+ attention_mask=decoder_attention_mask,
1420
+ encoder_hidden_states=encoder_outputs[0],
1421
+ encoder_attention_mask=attention_mask,
1422
+ past_key_values=past_key_values,
1423
+ inputs_embeds=decoder_inputs_embeds,
1424
+ use_cache=use_cache,
1425
+ output_attentions=output_attentions,
1426
+ output_hidden_states=output_hidden_states,
1427
+ return_dict=return_dict,
1428
+ )
1429
+
1430
+ if not return_dict:
1431
+ return decoder_outputs + encoder_outputs
1432
+
1433
+ return Seq2SeqModelOutput(
1434
+ last_hidden_state=decoder_outputs.last_hidden_state,
1435
+ past_key_values=decoder_outputs.past_key_values,
1436
+ decoder_hidden_states=decoder_outputs.hidden_states,
1437
+ decoder_attentions=decoder_outputs.attentions,
1438
+ cross_attentions=decoder_outputs.cross_attentions,
1439
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1440
+ encoder_hidden_states=encoder_outputs.hidden_states,
1441
+ encoder_attentions=encoder_outputs.attentions,
1442
+ )
1443
+
1444
+
1445
+ @add_start_docstrings("The PEGASUS-X for conditional generation (e.g. summarization).", PEGASUS_X_START_DOCSTRING)
1446
+ class PegasusXForConditionalGeneration(PegasusXPreTrainedModel):
1447
+ base_model_prefix = "model"
1448
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
1449
+
1450
+ def __init__(self, config: PegasusXConfig):
1451
+ super().__init__(config)
1452
+ self.model = PegasusXModel(config)
1453
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
1454
+
1455
+ # Initialize weights and apply final processing
1456
+ self.post_init()
1457
+
1458
+ def get_encoder(self):
1459
+ return self.model.get_encoder()
1460
+
1461
+ def get_decoder(self):
1462
+ return self.model.get_decoder()
1463
+
1464
+ def get_output_embeddings(self):
1465
+ return self.lm_head
1466
+
1467
+ def set_output_embeddings(self, new_embeddings):
1468
+ self.lm_head = new_embeddings
1469
+
1470
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1471
+ """
1472
+ Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
1473
+ config.max_position_embeddings`.
1474
+
1475
+ Arguments:
1476
+ new_num_position_embeddings (`int`):
1477
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1478
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1479
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1480
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1481
+ will remove vectors from the end.
1482
+ """
1483
+ self.config.max_position_embeddings = new_num_position_embeddings
1484
+ self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
1485
+ self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
1486
+
1487
+ def get_position_embeddings(self) -> Tuple[nn.Embedding]:
1488
+ """
1489
+ Returns the position embeddings matrix
1490
+ """
1491
+ return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
1492
+
1493
+ @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING)
1494
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1495
+ @add_end_docstrings(PEGASUS_X_GENERATION_EXAMPLE)
1496
+ def forward(
1497
+ self,
1498
+ input_ids: Optional[torch.Tensor] = None,
1499
+ attention_mask: Optional[torch.Tensor] = None,
1500
+ decoder_input_ids: Optional[torch.Tensor] = None,
1501
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1502
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
1503
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1504
+ inputs_embeds: Optional[torch.Tensor] = None,
1505
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1506
+ labels: Optional[torch.Tensor] = None,
1507
+ use_cache: Optional[bool] = None,
1508
+ output_attentions: Optional[bool] = None,
1509
+ output_hidden_states: Optional[bool] = None,
1510
+ return_dict: Optional[bool] = None,
1511
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
1512
+ r"""
1513
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1514
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1515
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1516
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1517
+
1518
+ Returns:
1519
+
1520
+ """
1521
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1522
+
1523
+ if labels is not None:
1524
+ if use_cache:
1525
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
1526
+ use_cache = False
1527
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1528
+ decoder_input_ids = shift_tokens_right(
1529
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
1530
+ )
1531
+
1532
+ outputs = self.model(
1533
+ input_ids,
1534
+ attention_mask=attention_mask,
1535
+ decoder_input_ids=decoder_input_ids,
1536
+ encoder_outputs=encoder_outputs,
1537
+ decoder_attention_mask=decoder_attention_mask,
1538
+ past_key_values=past_key_values,
1539
+ inputs_embeds=inputs_embeds,
1540
+ decoder_inputs_embeds=decoder_inputs_embeds,
1541
+ use_cache=use_cache,
1542
+ output_attentions=output_attentions,
1543
+ output_hidden_states=output_hidden_states,
1544
+ return_dict=return_dict,
1545
+ )
1546
+ lm_logits = self.lm_head(outputs[0])
1547
+
1548
+ masked_lm_loss = None
1549
+ if labels is not None:
1550
+ loss_fct = CrossEntropyLoss()
1551
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
1552
+
1553
+ if not return_dict:
1554
+ output = (lm_logits,) + outputs[1:]
1555
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1556
+
1557
+ return Seq2SeqLMOutput(
1558
+ loss=masked_lm_loss,
1559
+ logits=lm_logits,
1560
+ past_key_values=outputs.past_key_values,
1561
+ decoder_hidden_states=outputs.decoder_hidden_states,
1562
+ decoder_attentions=outputs.decoder_attentions,
1563
+ cross_attentions=outputs.cross_attentions,
1564
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1565
+ encoder_hidden_states=outputs.encoder_hidden_states,
1566
+ encoder_attentions=outputs.encoder_attentions,
1567
+ )
1568
+
1569
+ def prepare_inputs_for_generation(
1570
+ self,
1571
+ decoder_input_ids,
1572
+ past_key_values=None,
1573
+ attention_mask=None,
1574
+ use_cache=None,
1575
+ encoder_outputs=None,
1576
+ **kwargs,
1577
+ ):
1578
+ # cut decoder_input_ids if past is used
1579
+ if past_key_values is not None:
1580
+ past_length = past_key_values[0][0].shape[2]
1581
+
1582
+ # Some generation methods already pass only the last input ID
1583
+ if decoder_input_ids.shape[1] > past_length:
1584
+ remove_prefix_length = past_length
1585
+ else:
1586
+ # Default to old behavior: keep only final ID
1587
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
1588
+
1589
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
1590
+
1591
+ return {
1592
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1593
+ "encoder_outputs": encoder_outputs,
1594
+ "past_key_values": past_key_values,
1595
+ "decoder_input_ids": decoder_input_ids,
1596
+ "attention_mask": attention_mask,
1597
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1598
+ }
1599
+
1600
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1601
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
1602
+
1603
+ @staticmethod
1604
+ def _reorder_cache(past_key_values, beam_idx):
1605
+ reordered_past = ()
1606
+ for layer_past in past_key_values:
1607
+ # cached cross_attention states don't have to be reordered -> they are always the same
1608
+ reordered_past += (
1609
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
1610
+ + layer_past[2:],
1611
+ )
1612
+ return reordered_past
1613
+
1614
+
1615
+ # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PegasusX
1616
+ class PegasusXDecoderWrapper(PegasusXPreTrainedModel):
1617
+ """
1618
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
1619
+ used in combination with the [`EncoderDecoderModel`] framework.
1620
+ """
1621
+
1622
+ def __init__(self, config):
1623
+ super().__init__(config)
1624
+ self.decoder = PegasusXDecoder(config)
1625
+
1626
+ def forward(self, *args, **kwargs):
1627
+ return self.decoder(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/udop/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_udop": ["UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP", "UdopConfig"],
28
+ "processing_udop": ["UdopProcessor"],
29
+ }
30
+
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_udop"] = ["UdopTokenizer"]
38
+
39
+ try:
40
+ if not is_tokenizers_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["tokenization_udop_fast"] = ["UdopTokenizerFast"]
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ _import_structure["modeling_udop"] = [
54
+ "UDOP_PRETRAINED_MODEL_ARCHIVE_LIST",
55
+ "UdopForConditionalGeneration",
56
+ "UdopPreTrainedModel",
57
+ "UdopModel",
58
+ "UdopEncoderModel",
59
+ ]
60
+
61
+ if TYPE_CHECKING:
62
+ from .configuration_udop import UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP, UdopConfig
63
+ from .processing_udop import UdopProcessor
64
+
65
+ try:
66
+ if not is_sentencepiece_available():
67
+ raise OptionalDependencyNotAvailable()
68
+ except OptionalDependencyNotAvailable:
69
+ pass
70
+ else:
71
+ from .tokenization_udop import UdopTokenizer
72
+
73
+ try:
74
+ if not is_tokenizers_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ from .tokenization_udop_fast import UdopTokenizerFast
80
+
81
+ try:
82
+ if not is_torch_available():
83
+ raise OptionalDependencyNotAvailable()
84
+ except OptionalDependencyNotAvailable:
85
+ pass
86
+ else:
87
+ from .modeling_udop import (
88
+ UDOP_PRETRAINED_MODEL_ARCHIVE_LIST,
89
+ UdopEncoderModel,
90
+ UdopForConditionalGeneration,
91
+ UdopModel,
92
+ UdopPreTrainedModel,
93
+ )
94
+
95
+ else:
96
+ import sys
97
+
98
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)