applied-ai-018 commited on
Commit
04bde8b
·
verified ·
1 Parent(s): 6015cf9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__init__.py +73 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py +136 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py +178 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py +345 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py +898 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/configuration_camembert.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_camembert.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_tf_camembert.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert_fast.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__init__.py +56 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/__init__.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/configuration_depth_anything.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/convert_depth_anything_to_hf.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/modeling_depth_anything.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/configuration_depth_anything.py +145 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/convert_depth_anything_to_hf.py +299 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/modeling_depth_anything.py +463 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__init__.py +168 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/configuration_electra.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/convert_electra_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_electra.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_flax_electra.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_tf_electra.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra_fast.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/configuration_electra.py +187 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py +80 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/modeling_electra.py +1679 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/modeling_flax_electra.py +1601 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/modeling_tf_electra.py +1768 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra.py +503 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra_fast.py +169 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +330 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__init__.py +70 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/__init__.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/configuration_wav2vec2_bert.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/convert_wav2vec2_seamless_checkpoint.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/modeling_wav2vec2_bert.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/processing_wav2vec2_bert.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +314 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py +218 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +1671 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig", "BitOnnxConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_bit"] = [
28
+ "BIT_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "BitForImageClassification",
30
+ "BitModel",
31
+ "BitPreTrainedModel",
32
+ "BitBackbone",
33
+ ]
34
+
35
+
36
+ try:
37
+ if not is_vision_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["image_processing_bit"] = ["BitImageProcessor"]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig, BitOnnxConfig
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_bit import (
55
+ BIT_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ BitBackbone,
57
+ BitForImageClassification,
58
+ BitModel,
59
+ BitPreTrainedModel,
60
+ )
61
+
62
+ try:
63
+ if not is_vision_available():
64
+ raise OptionalDependencyNotAvailable()
65
+ except OptionalDependencyNotAvailable:
66
+ pass
67
+ else:
68
+ from .image_processing_bit import BitImageProcessor
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc ADDED
Binary file (5.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BiT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class BitConfig(BackboneConfigMixin, PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the BiT
33
+ [google/bit-50](https://huggingface.co/google/bit-50) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ num_channels (`int`, *optional*, defaults to 3):
40
+ The number of input channels.
41
+ embedding_size (`int`, *optional*, defaults to 64):
42
+ Dimensionality (hidden size) for the embedding layer.
43
+ hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
44
+ Dimensionality (hidden size) at each stage.
45
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
46
+ Depth (number of layers) for each stage.
47
+ layer_type (`str`, *optional*, defaults to `"preactivation"`):
48
+ The layer to use, it can be either `"preactivation"` or `"bottleneck"`.
49
+ hidden_act (`str`, *optional*, defaults to `"relu"`):
50
+ The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
51
+ are supported.
52
+ global_padding (`str`, *optional*):
53
+ Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`.
54
+ num_groups (`int`, *optional*, defaults to 32):
55
+ Number of groups used for the `BitGroupNormActivation` layers.
56
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
57
+ The drop path rate for the stochastic depth.
58
+ embedding_dynamic_padding (`bool`, *optional*, defaults to `False`):
59
+ Whether or not to make use of dynamic padding for the embedding layer.
60
+ output_stride (`int`, *optional*, defaults to 32):
61
+ The output stride of the model.
62
+ width_factor (`int`, *optional*, defaults to 1):
63
+ The width factor for the model.
64
+ out_features (`List[str]`, *optional*):
65
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
66
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
67
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
68
+ same order as defined in the `stage_names` attribute.
69
+ out_indices (`List[int]`, *optional*):
70
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
71
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
72
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
73
+ same order as defined in the `stage_names` attribute.
74
+
75
+ Example:
76
+ ```python
77
+ >>> from transformers import BitConfig, BitModel
78
+
79
+ >>> # Initializing a BiT bit-50 style configuration
80
+ >>> configuration = BitConfig()
81
+
82
+ >>> # Initializing a model (with random weights) from the bit-50 style configuration
83
+ >>> model = BitModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```
88
+ """
89
+
90
+ model_type = "bit"
91
+ layer_types = ["preactivation", "bottleneck"]
92
+ supported_padding = ["SAME", "VALID"]
93
+
94
+ def __init__(
95
+ self,
96
+ num_channels=3,
97
+ embedding_size=64,
98
+ hidden_sizes=[256, 512, 1024, 2048],
99
+ depths=[3, 4, 6, 3],
100
+ layer_type="preactivation",
101
+ hidden_act="relu",
102
+ global_padding=None,
103
+ num_groups=32,
104
+ drop_path_rate=0.0,
105
+ embedding_dynamic_padding=False,
106
+ output_stride=32,
107
+ width_factor=1,
108
+ out_features=None,
109
+ out_indices=None,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(**kwargs)
113
+ if layer_type not in self.layer_types:
114
+ raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
115
+ if global_padding is not None:
116
+ if global_padding.upper() in self.supported_padding:
117
+ global_padding = global_padding.upper()
118
+ else:
119
+ raise ValueError(f"Padding strategy {global_padding} not supported")
120
+ self.num_channels = num_channels
121
+ self.embedding_size = embedding_size
122
+ self.hidden_sizes = hidden_sizes
123
+ self.depths = depths
124
+ self.layer_type = layer_type
125
+ self.hidden_act = hidden_act
126
+ self.global_padding = global_padding
127
+ self.num_groups = num_groups
128
+ self.drop_path_rate = drop_path_rate
129
+ self.embedding_dynamic_padding = embedding_dynamic_padding
130
+ self.output_stride = output_stride
131
+ self.width_factor = width_factor
132
+
133
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
134
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
135
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
136
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BiT checkpoints from the timm library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+ from timm import create_model
27
+ from timm.data import resolve_data_config
28
+ from timm.data.transforms_factory import create_transform
29
+
30
+ from transformers import BitConfig, BitForImageClassification, BitImageProcessor
31
+ from transformers.image_utils import PILImageResampling
32
+ from transformers.utils import logging
33
+
34
+
35
+ logging.set_verbosity_info()
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ def get_config(model_name):
40
+ repo_id = "huggingface/label-files"
41
+ filename = "imagenet-1k-id2label.json"
42
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
43
+ id2label = {int(k): v for k, v in id2label.items()}
44
+ label2id = {v: k for k, v in id2label.items()}
45
+
46
+ conv_layer = "std_conv" if "bit" in model_name else False
47
+
48
+ # note that when using BiT as backbone for ViT-hybrid checkpoints,
49
+ # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
50
+ # config.conv_layer = "std_conv_same"
51
+ config = BitConfig(
52
+ conv_layer=conv_layer,
53
+ num_labels=1000,
54
+ id2label=id2label,
55
+ label2id=label2id,
56
+ )
57
+
58
+ return config
59
+
60
+
61
+ def rename_key(name):
62
+ if "stem.conv" in name:
63
+ name = name.replace("stem.conv", "bit.embedder.convolution")
64
+ if "blocks" in name:
65
+ name = name.replace("blocks", "layers")
66
+ if "head.fc" in name:
67
+ name = name.replace("head.fc", "classifier.1")
68
+ if name.startswith("norm"):
69
+ name = "bit." + name
70
+ if "bit" not in name and "classifier" not in name:
71
+ name = "bit.encoder." + name
72
+
73
+ return name
74
+
75
+
76
+ # We will verify our results on an image of cute cats
77
+ def prepare_img():
78
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
79
+ im = Image.open(requests.get(url, stream=True).raw)
80
+ return im
81
+
82
+
83
+ @torch.no_grad()
84
+ def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
85
+ """
86
+ Copy/paste/tweak model's weights to our BiT structure.
87
+ """
88
+
89
+ # define default BiT configuration
90
+ config = get_config(model_name)
91
+
92
+ # load original model from timm
93
+ timm_model = create_model(model_name, pretrained=True)
94
+ timm_model.eval()
95
+
96
+ # load state_dict of original model
97
+ state_dict = timm_model.state_dict()
98
+ for key in state_dict.copy().keys():
99
+ val = state_dict.pop(key)
100
+ state_dict[rename_key(key)] = val.squeeze() if "head" in key else val
101
+
102
+ # load HuggingFace model
103
+ model = BitForImageClassification(config)
104
+ model.eval()
105
+ model.load_state_dict(state_dict)
106
+
107
+ # create image processor
108
+ transform = create_transform(**resolve_data_config({}, model=timm_model))
109
+ timm_transforms = transform.transforms
110
+
111
+ pillow_resamplings = {
112
+ "bilinear": PILImageResampling.BILINEAR,
113
+ "bicubic": PILImageResampling.BICUBIC,
114
+ "nearest": PILImageResampling.NEAREST,
115
+ }
116
+
117
+ processor = BitImageProcessor(
118
+ do_resize=True,
119
+ size={"shortest_edge": timm_transforms[0].size},
120
+ resample=pillow_resamplings[timm_transforms[0].interpolation.value],
121
+ do_center_crop=True,
122
+ crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]},
123
+ do_normalize=True,
124
+ image_mean=timm_transforms[-1].mean.tolist(),
125
+ image_std=timm_transforms[-1].std.tolist(),
126
+ )
127
+
128
+ image = prepare_img()
129
+ timm_pixel_values = transform(image).unsqueeze(0)
130
+ pixel_values = processor(image, return_tensors="pt").pixel_values
131
+
132
+ # verify pixel values
133
+ assert torch.allclose(timm_pixel_values, pixel_values)
134
+
135
+ # verify logits
136
+ with torch.no_grad():
137
+ outputs = model(pixel_values)
138
+ logits = outputs.logits
139
+
140
+ print("Logits:", logits[0, :3])
141
+ print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
142
+ timm_logits = timm_model(pixel_values)
143
+ assert timm_logits.shape == outputs.logits.shape
144
+ assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
145
+ print("Looks ok!")
146
+
147
+ if pytorch_dump_folder_path is not None:
148
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
149
+ print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}")
150
+ model.save_pretrained(pytorch_dump_folder_path)
151
+ processor.save_pretrained(pytorch_dump_folder_path)
152
+
153
+ if push_to_hub:
154
+ print(f"Pushing model {model_name} and processor to the hub")
155
+ model.push_to_hub(f"ybelkada/{model_name}")
156
+ processor.push_to_hub(f"ybelkada/{model_name}")
157
+
158
+
159
+ if __name__ == "__main__":
160
+ parser = argparse.ArgumentParser()
161
+ # Required parameters
162
+ parser.add_argument(
163
+ "--model_name",
164
+ default="resnetv2_50x1_bitm",
165
+ type=str,
166
+ help="Name of the BiT timm model you'd like to convert.",
167
+ )
168
+ parser.add_argument(
169
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
170
+ )
171
+ parser.add_argument(
172
+ "--push_to_hub",
173
+ action="store_true",
174
+ help="Whether to push the model to the hub.",
175
+ )
176
+
177
+ args = parser.parse_args()
178
+ convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for BiT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ convert_to_rgb,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ OPENAI_CLIP_MEAN,
30
+ OPENAI_CLIP_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ validate_kwargs,
40
+ validate_preprocess_arguments,
41
+ )
42
+ from ...utils import TensorType, is_vision_available, logging
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ if is_vision_available():
49
+ import PIL
50
+
51
+
52
+ class BitImageProcessor(BaseImageProcessor):
53
+ r"""
54
+ Constructs a BiT image processor.
55
+
56
+ Args:
57
+ do_resize (`bool`, *optional*, defaults to `True`):
58
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
59
+ `do_resize` in the `preprocess` method.
60
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
61
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
62
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
63
+ method.
64
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
65
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
66
+ do_center_crop (`bool`, *optional*, defaults to `True`):
67
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
68
+ `preprocess` method.
69
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
70
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
71
+ method.
72
+ do_rescale (`bool`, *optional*, defaults to `True`):
73
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
74
+ the `preprocess` method.
75
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
76
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
77
+ method.
78
+ do_normalize:
79
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
80
+ image_mean (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
81
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
82
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
83
+ image_std (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
87
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
88
+ Whether to convert the image to RGB.
89
+ """
90
+
91
+ model_input_names = ["pixel_values"]
92
+
93
+ def __init__(
94
+ self,
95
+ do_resize: bool = True,
96
+ size: Dict[str, int] = None,
97
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
98
+ do_center_crop: bool = True,
99
+ crop_size: Dict[str, int] = None,
100
+ do_rescale: bool = True,
101
+ rescale_factor: Union[int, float] = 1 / 255,
102
+ do_normalize: bool = True,
103
+ image_mean: Optional[Union[float, List[float]]] = None,
104
+ image_std: Optional[Union[float, List[float]]] = None,
105
+ do_convert_rgb: bool = True,
106
+ **kwargs,
107
+ ) -> None:
108
+ super().__init__(**kwargs)
109
+ size = size if size is not None else {"shortest_edge": 224}
110
+ size = get_size_dict(size, default_to_square=False)
111
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
112
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
113
+
114
+ self.do_resize = do_resize
115
+ self.size = size
116
+ self.resample = resample
117
+ self.do_center_crop = do_center_crop
118
+ self.crop_size = crop_size
119
+ self.do_rescale = do_rescale
120
+ self.rescale_factor = rescale_factor
121
+ self.do_normalize = do_normalize
122
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
123
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
124
+ self.do_convert_rgb = do_convert_rgb
125
+ self._valid_processor_keys = [
126
+ "images",
127
+ "do_resize",
128
+ "size",
129
+ "resample",
130
+ "do_center_crop",
131
+ "crop_size",
132
+ "do_rescale",
133
+ "rescale_factor",
134
+ "do_normalize",
135
+ "image_mean",
136
+ "image_std",
137
+ "do_convert_rgb",
138
+ "return_tensors",
139
+ "data_format",
140
+ "input_data_format",
141
+ ]
142
+
143
+ # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
144
+ def resize(
145
+ self,
146
+ image: np.ndarray,
147
+ size: Dict[str, int],
148
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
149
+ data_format: Optional[Union[str, ChannelDimension]] = None,
150
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
151
+ **kwargs,
152
+ ) -> np.ndarray:
153
+ """
154
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
155
+ resized to keep the input aspect ratio.
156
+
157
+ Args:
158
+ image (`np.ndarray`):
159
+ Image to resize.
160
+ size (`Dict[str, int]`):
161
+ Size of the output image.
162
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
163
+ Resampling filter to use when resiizing the image.
164
+ data_format (`str` or `ChannelDimension`, *optional*):
165
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
166
+ input_data_format (`ChannelDimension` or `str`, *optional*):
167
+ The channel dimension format of the input image. If not provided, it will be inferred.
168
+ """
169
+ default_to_square = True
170
+ if "shortest_edge" in size:
171
+ size = size["shortest_edge"]
172
+ default_to_square = False
173
+ elif "height" in size and "width" in size:
174
+ size = (size["height"], size["width"])
175
+ else:
176
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
177
+
178
+ output_size = get_resize_output_image_size(
179
+ image,
180
+ size=size,
181
+ default_to_square=default_to_square,
182
+ input_data_format=input_data_format,
183
+ )
184
+ return resize(
185
+ image,
186
+ size=output_size,
187
+ resample=resample,
188
+ data_format=data_format,
189
+ input_data_format=input_data_format,
190
+ **kwargs,
191
+ )
192
+
193
+ def preprocess(
194
+ self,
195
+ images: ImageInput,
196
+ do_resize: bool = None,
197
+ size: Dict[str, int] = None,
198
+ resample: PILImageResampling = None,
199
+ do_center_crop: bool = None,
200
+ crop_size: int = None,
201
+ do_rescale: bool = None,
202
+ rescale_factor: float = None,
203
+ do_normalize: bool = None,
204
+ image_mean: Optional[Union[float, List[float]]] = None,
205
+ image_std: Optional[Union[float, List[float]]] = None,
206
+ do_convert_rgb: bool = None,
207
+ return_tensors: Optional[Union[str, TensorType]] = None,
208
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
209
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
210
+ **kwargs,
211
+ ) -> PIL.Image.Image:
212
+ """
213
+ Preprocess an image or batch of images.
214
+
215
+ Args:
216
+ images (`ImageInput`):
217
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
218
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
219
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
220
+ Whether to resize the image.
221
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
222
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
223
+ the longest edge resized to keep the input aspect ratio.
224
+ resample (`int`, *optional*, defaults to `self.resample`):
225
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
226
+ has an effect if `do_resize` is set to `True`.
227
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
228
+ Whether to center crop the image.
229
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
230
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
231
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
232
+ Whether to rescale the image.
233
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
234
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
235
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
236
+ Whether to normalize the image.
237
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
238
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
239
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
240
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
241
+ `True`.
242
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
243
+ Whether to convert the image to RGB.
244
+ return_tensors (`str` or `TensorType`, *optional*):
245
+ The type of tensors to return. Can be one of:
246
+ - Unset: Return a list of `np.ndarray`.
247
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
248
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
249
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
250
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
251
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
252
+ The channel dimension format for the output image. Can be one of:
253
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
254
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
255
+ - Unset: Use the channel dimension format of the input image.
256
+ input_data_format (`ChannelDimension` or `str`, *optional*):
257
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
258
+ from the input image. Can be one of:
259
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
260
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
261
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
262
+ """
263
+ do_resize = do_resize if do_resize is not None else self.do_resize
264
+ size = size if size is not None else self.size
265
+ size = get_size_dict(size, param_name="size", default_to_square=False)
266
+ resample = resample if resample is not None else self.resample
267
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
268
+ crop_size = crop_size if crop_size is not None else self.crop_size
269
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
270
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
271
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
272
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
273
+ image_mean = image_mean if image_mean is not None else self.image_mean
274
+ image_std = image_std if image_std is not None else self.image_std
275
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
276
+
277
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
278
+
279
+ images = make_list_of_images(images)
280
+
281
+ if not valid_images(images):
282
+ raise ValueError(
283
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
284
+ "torch.Tensor, tf.Tensor or jax.ndarray."
285
+ )
286
+
287
+ validate_preprocess_arguments(
288
+ do_rescale=do_rescale,
289
+ rescale_factor=rescale_factor,
290
+ do_normalize=do_normalize,
291
+ image_mean=image_mean,
292
+ image_std=image_std,
293
+ do_center_crop=do_center_crop,
294
+ crop_size=crop_size,
295
+ do_resize=do_resize,
296
+ size=size,
297
+ resample=resample,
298
+ )
299
+
300
+ # PIL RGBA images are converted to RGB
301
+ if do_convert_rgb:
302
+ images = [convert_to_rgb(image) for image in images]
303
+
304
+ # All transformations expect numpy arrays.
305
+ images = [to_numpy_array(image) for image in images]
306
+
307
+ if is_scaled_image(images[0]) and do_rescale:
308
+ logger.warning_once(
309
+ "It looks like you are trying to rescale already rescaled images. If the input"
310
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
311
+ )
312
+
313
+ if input_data_format is None:
314
+ # We assume that all images have the same channel dimension format.
315
+ input_data_format = infer_channel_dimension_format(images[0])
316
+
317
+ if do_resize:
318
+ images = [
319
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
320
+ for image in images
321
+ ]
322
+
323
+ if do_center_crop:
324
+ images = [
325
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
326
+ ]
327
+
328
+ if do_rescale:
329
+ images = [
330
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
331
+ for image in images
332
+ ]
333
+
334
+ if do_normalize:
335
+ images = [
336
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
337
+ for image in images
338
+ ]
339
+
340
+ images = [
341
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
342
+ ]
343
+
344
+ data = {"pixel_values": images}
345
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py ADDED
@@ -0,0 +1,898 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BiT model. Also supports backbone for ViT hybrid."""
16
+
17
+ import collections
18
+ import math
19
+ from typing import Optional, Tuple
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import Tensor, nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BackboneOutput,
30
+ BaseModelOutputWithNoAttention,
31
+ BaseModelOutputWithPoolingAndNoAttention,
32
+ ImageClassifierOutputWithNoAttention,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from ...utils.backbone_utils import BackboneMixin
43
+ from .configuration_bit import BitConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ # General docstring
49
+ _CONFIG_FOR_DOC = "BitConfig"
50
+
51
+ # Base docstring
52
+ _CHECKPOINT_FOR_DOC = "google/bit-50"
53
+ _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7]
54
+
55
+ # Image classification docstring
56
+ _IMAGE_CLASS_CHECKPOINT = "google/bit-50"
57
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
58
+
59
+
60
+ from ..deprecated._archive_maps import BIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
61
+
62
+
63
+ def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]:
64
+ r"""
65
+ Utility function to get the tuple padding value given the kernel_size and padding.
66
+
67
+ Args:
68
+ padding (Union[`str`, `int`], *optional*):
69
+ Padding value, can be either `"same"`, `"valid"`. If a different value is provided the default padding from
70
+ PyTorch is used.
71
+ kernel_size (`int`, *optional*, defaults to 7):
72
+ Kernel size of the convolution layers.
73
+ stride (`int`, *optional*, defaults to 1):
74
+ Stride value of the convolution layers.
75
+ dilation (`int`, *optional*, defaults to 1):
76
+ Dilation value of the convolution layers.
77
+ """
78
+ dynamic = False
79
+ if padding is None:
80
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
81
+ return padding, dynamic
82
+
83
+ if isinstance(padding, str):
84
+ # for any string padding, the padding will be calculated for you, one of three ways
85
+ padding = padding.lower()
86
+ if padding == "same":
87
+ # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
88
+ if stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0:
89
+ # static case, no extra overhead
90
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
91
+ else:
92
+ # dynamic 'SAME' padding, has runtime/GPU memory overhead
93
+ padding = 0
94
+ dynamic = True
95
+ elif padding == "valid":
96
+ # 'VALID' padding, same as padding=0
97
+ padding = 0
98
+ else:
99
+ # Default to PyTorch style 'same'-ish symmetric padding
100
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
101
+ return padding, dynamic
102
+
103
+
104
+ class WeightStandardizedConv2d(nn.Conv2d):
105
+ """Conv2d with Weight Standardization. Includes TensorFlow compatible SAME padding. Used for ViT Hybrid model.
106
+
107
+ Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight
108
+ Standardization](https://arxiv.org/abs/1903.10520v2)
109
+ """
110
+
111
+ def __init__(
112
+ self,
113
+ in_channel,
114
+ out_channels,
115
+ kernel_size,
116
+ stride=1,
117
+ padding="SAME",
118
+ dilation=1,
119
+ groups=1,
120
+ bias=False,
121
+ eps=1e-6,
122
+ ):
123
+ padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
124
+ super().__init__(
125
+ in_channel,
126
+ out_channels,
127
+ kernel_size,
128
+ stride=stride,
129
+ padding=padding,
130
+ dilation=dilation,
131
+ groups=groups,
132
+ bias=bias,
133
+ )
134
+ if is_dynamic:
135
+ self.pad = DynamicPad2d(kernel_size, stride, dilation)
136
+ else:
137
+ self.pad = None
138
+ self.eps = eps
139
+
140
+ def forward(self, hidden_state):
141
+ if self.pad is not None:
142
+ hidden_state = self.pad(hidden_state)
143
+ weight = nn.functional.batch_norm(
144
+ self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps
145
+ ).reshape_as(self.weight)
146
+ hidden_state = nn.functional.conv2d(
147
+ hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups
148
+ )
149
+ return hidden_state
150
+
151
+
152
+ class BitGroupNormActivation(nn.GroupNorm):
153
+ r"""
154
+ A module that combines group normalization with an activation function.
155
+ """
156
+
157
+ def __init__(self, config, num_channels, eps=1e-5, affine=True, apply_activation=True):
158
+ super(BitGroupNormActivation, self).__init__(config.num_groups, num_channels, eps=eps, affine=affine)
159
+ if apply_activation:
160
+ self.activation = ACT2FN[config.hidden_act]
161
+ else:
162
+ self.activation = nn.Identity()
163
+
164
+ def forward(self, hidden_state):
165
+ hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps)
166
+ hidden_state = self.activation(hidden_state)
167
+ return hidden_state
168
+
169
+
170
+ class DynamicPad2d(nn.Module):
171
+ r"""
172
+ A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input
173
+ hidden states.
174
+ """
175
+
176
+ def __init__(self, kernel_size, stride, dilation, value=0):
177
+ super().__init__()
178
+ # Safety checkers
179
+ if isinstance(kernel_size, int):
180
+ kernel_size = (kernel_size, kernel_size)
181
+
182
+ if isinstance(stride, int):
183
+ stride = (stride, stride)
184
+
185
+ if isinstance(dilation, int):
186
+ dilation = (dilation, dilation)
187
+
188
+ self.kernel_size = kernel_size
189
+ self.stride = stride
190
+ self.dilation = dilation
191
+ self.value = value
192
+
193
+ def compute_padding(x, kernel_size, stride, dilation):
194
+ return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
195
+
196
+ self.compute_padding = compute_padding
197
+
198
+ def __call__(self, input):
199
+ # Get width and height
200
+ input_height, input_width = input.size()[-2:]
201
+
202
+ # Compute the padding values
203
+ padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0])
204
+ padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1])
205
+
206
+ # apply pad
207
+ if padding_height > 0 or padding_width > 0:
208
+ input = nn.functional.pad(
209
+ input,
210
+ [
211
+ padding_width // 2,
212
+ padding_width - padding_width // 2,
213
+ padding_height // 2,
214
+ padding_height - padding_height // 2,
215
+ ],
216
+ value=self.value,
217
+ )
218
+ return input
219
+
220
+
221
+ class BitMaxPool2d(nn.MaxPool2d):
222
+ """Tensorflow like 'SAME' wrapper for 2D max pooling"""
223
+
224
+ def __init__(
225
+ self,
226
+ kernel_size: int,
227
+ stride=None,
228
+ dilation=1,
229
+ ceil_mode=False,
230
+ padding=(0, 0),
231
+ padding_value=0,
232
+ use_dynamic_padding=True,
233
+ ):
234
+ kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size)
235
+ stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
236
+ dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation)
237
+ super().__init__(kernel_size, stride, padding, dilation, ceil_mode)
238
+ if use_dynamic_padding:
239
+ self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value)
240
+ else:
241
+ self.pad = nn.Identity()
242
+
243
+ def forward(self, hidden_states):
244
+ hidden_states = self.pad(hidden_states)
245
+ return nn.functional.max_pool2d(
246
+ hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode
247
+ )
248
+
249
+
250
+ class BitEmbeddings(nn.Module):
251
+ """
252
+ BiT Embeddings (stem) composed of a single aggressive convolution.
253
+ """
254
+
255
+ def __init__(self, config: BitConfig):
256
+ super().__init__()
257
+
258
+ self.convolution = WeightStandardizedConv2d(
259
+ config.num_channels,
260
+ config.embedding_size,
261
+ kernel_size=7,
262
+ stride=2,
263
+ eps=1e-8,
264
+ padding=config.global_padding,
265
+ )
266
+
267
+ self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding)
268
+
269
+ # Use the same padding strategy as convolutional layers
270
+ if config.global_padding is not None and config.global_padding.upper() == "SAME":
271
+ self.pad = nn.Identity()
272
+ else:
273
+ self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0)
274
+
275
+ if not config.layer_type == "preactivation":
276
+ self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size)
277
+ else:
278
+ self.norm = nn.Identity()
279
+
280
+ self.num_channels = config.num_channels
281
+
282
+ def forward(self, pixel_values: Tensor) -> Tensor:
283
+ num_channels = pixel_values.shape[1]
284
+ if num_channels != self.num_channels:
285
+ raise ValueError(
286
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
287
+ )
288
+
289
+ embedding = self.convolution(pixel_values)
290
+
291
+ embedding = self.pad(embedding)
292
+
293
+ embedding = self.norm(embedding)
294
+
295
+ embedding = self.pooler(embedding)
296
+
297
+ return embedding
298
+
299
+
300
+ # Copied from transformers.models.convnext.modeling_convnext.drop_path
301
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
302
+ """
303
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
304
+
305
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
306
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
307
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
308
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
309
+ argument.
310
+ """
311
+ if drop_prob == 0.0 or not training:
312
+ return input
313
+ keep_prob = 1 - drop_prob
314
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
315
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
316
+ random_tensor.floor_() # binarize
317
+ output = input.div(keep_prob) * random_tensor
318
+ return output
319
+
320
+
321
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Bit
322
+ class BitDropPath(nn.Module):
323
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
324
+
325
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
326
+ super().__init__()
327
+ self.drop_prob = drop_prob
328
+
329
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
330
+ return drop_path(hidden_states, self.drop_prob, self.training)
331
+
332
+ def extra_repr(self) -> str:
333
+ return "p={}".format(self.drop_prob)
334
+
335
+
336
+ def make_div(value, divisor=8):
337
+ min_value = divisor
338
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
339
+ if new_value < 0.9 * value:
340
+ new_value += divisor
341
+ return new_value
342
+
343
+
344
+ class BitPreActivationBottleneckLayer(nn.Module):
345
+ """Pre-activation (v2) bottleneck block.
346
+ Follows the implementation of "Identity Mappings in Deep Residual Networks":
347
+ https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
348
+
349
+ Except it puts the stride on 3x3 conv when available.
350
+ """
351
+
352
+ def __init__(
353
+ self,
354
+ config,
355
+ in_channels,
356
+ out_channels=None,
357
+ bottle_ratio=0.25,
358
+ stride=1,
359
+ dilation=1,
360
+ first_dilation=None,
361
+ groups=1,
362
+ drop_path_rate=0.0,
363
+ is_first_layer=False,
364
+ ):
365
+ super().__init__()
366
+
367
+ first_dilation = first_dilation or dilation
368
+
369
+ out_channels = out_channels or in_channels
370
+ mid_channels = make_div(out_channels * bottle_ratio)
371
+
372
+ if is_first_layer:
373
+ self.downsample = BitDownsampleConv(
374
+ config,
375
+ in_channels,
376
+ out_channels,
377
+ stride=stride,
378
+ preact=True,
379
+ )
380
+ else:
381
+ self.downsample = None
382
+
383
+ self.norm1 = BitGroupNormActivation(config, in_channels)
384
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-8, padding=config.global_padding)
385
+
386
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels)
387
+ self.conv2 = WeightStandardizedConv2d(
388
+ mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-8, padding=config.global_padding
389
+ )
390
+
391
+ self.norm3 = BitGroupNormActivation(config, mid_channels)
392
+ self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-8, padding=config.global_padding)
393
+
394
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
395
+
396
+ def forward(self, hidden_states):
397
+ hidden_states_preact = self.norm1(hidden_states)
398
+
399
+ # shortcut branch
400
+ shortcut = hidden_states
401
+ if self.downsample is not None:
402
+ shortcut = self.downsample(hidden_states_preact)
403
+
404
+ # residual branch
405
+ hidden_states = self.conv1(hidden_states_preact)
406
+ hidden_states = self.conv2(self.norm2(hidden_states))
407
+ hidden_states = self.conv3(self.norm3(hidden_states))
408
+ hidden_states = self.drop_path(hidden_states)
409
+ return hidden_states + shortcut
410
+
411
+
412
+ class BitBottleneckLayer(nn.Module):
413
+ """Non Pre-activation bottleneck block, equivalent to V1.5/V1b bottleneck. Used for ViT Hybrid."""
414
+
415
+ def __init__(
416
+ self,
417
+ config,
418
+ in_channels,
419
+ out_channels=None,
420
+ bottle_ratio=0.25,
421
+ stride=1,
422
+ dilation=1,
423
+ first_dilation=None,
424
+ groups=1,
425
+ drop_path_rate=0.0,
426
+ is_first_layer=False,
427
+ ):
428
+ super().__init__()
429
+ first_dilation = first_dilation or dilation
430
+
431
+ out_channels = out_channels or in_channels
432
+ mid_chs = make_div(out_channels * bottle_ratio)
433
+
434
+ if is_first_layer:
435
+ self.downsample = BitDownsampleConv(
436
+ config,
437
+ in_channels,
438
+ out_channels,
439
+ stride=stride,
440
+ preact=False,
441
+ )
442
+ else:
443
+ self.downsample = None
444
+
445
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-8, padding=config.global_padding)
446
+ self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs)
447
+ self.conv2 = WeightStandardizedConv2d(
448
+ mid_chs,
449
+ mid_chs,
450
+ 3,
451
+ stride=stride,
452
+ dilation=first_dilation,
453
+ groups=groups,
454
+ eps=1e-8,
455
+ padding=config.global_padding,
456
+ )
457
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs)
458
+ self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-8, padding=config.global_padding)
459
+ self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
460
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
461
+
462
+ self.activation = ACT2FN[config.hidden_act]
463
+
464
+ def forward(self, hidden_states):
465
+ # shortcut branch
466
+ shortcut = hidden_states
467
+ if self.downsample is not None:
468
+ shortcut = self.downsample(hidden_states)
469
+
470
+ # residual
471
+ hidden_states = self.conv1(hidden_states)
472
+ hidden_states = self.norm1(hidden_states)
473
+
474
+ hidden_states = self.conv2(hidden_states)
475
+ hidden_states = self.norm2(hidden_states)
476
+
477
+ hidden_states = self.conv3(hidden_states)
478
+ hidden_states = self.norm3(hidden_states)
479
+
480
+ hidden_states = self.drop_path(hidden_states)
481
+ hidden_states = self.activation(hidden_states + shortcut)
482
+ return hidden_states
483
+
484
+
485
+ class BitDownsampleConv(nn.Module):
486
+ def __init__(
487
+ self,
488
+ config,
489
+ in_channels,
490
+ out_channels,
491
+ stride=1,
492
+ preact=True,
493
+ ):
494
+ super().__init__()
495
+ self.conv = WeightStandardizedConv2d(
496
+ in_channels, out_channels, 1, stride=stride, eps=1e-8, padding=config.global_padding
497
+ )
498
+ self.norm = (
499
+ nn.Identity()
500
+ if preact
501
+ else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
502
+ )
503
+
504
+ def forward(self, x):
505
+ return self.norm(self.conv(x))
506
+
507
+
508
+ class BitStage(nn.Module):
509
+ """
510
+ A ResNet v2 stage composed by stacked layers.
511
+ """
512
+
513
+ def __init__(
514
+ self,
515
+ config,
516
+ in_channels,
517
+ out_channels,
518
+ stride,
519
+ dilation,
520
+ depth,
521
+ bottle_ratio=0.25,
522
+ layer_dropout=None,
523
+ ):
524
+ super().__init__()
525
+
526
+ first_dilation = 1 if dilation in (1, 2) else 2
527
+
528
+ # Get the layer type
529
+ if config.layer_type == "bottleneck":
530
+ layer_cls = BitBottleneckLayer
531
+ else:
532
+ layer_cls = BitPreActivationBottleneckLayer
533
+
534
+ prev_chs = in_channels
535
+ self.layers = nn.Sequential()
536
+ for layer_idx in range(depth):
537
+ # Get the current hyper-parameters
538
+ stride, drop_path_rate, is_first_layer = self._get_updated_hyperparameters(
539
+ layer_idx, stride, layer_dropout
540
+ )
541
+
542
+ self.layers.add_module(
543
+ str(layer_idx),
544
+ layer_cls(
545
+ config,
546
+ prev_chs,
547
+ out_channels,
548
+ stride=stride,
549
+ dilation=dilation,
550
+ bottle_ratio=bottle_ratio,
551
+ first_dilation=first_dilation,
552
+ drop_path_rate=drop_path_rate,
553
+ is_first_layer=is_first_layer,
554
+ ),
555
+ )
556
+ prev_chs = out_channels
557
+ first_dilation = dilation
558
+
559
+ def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout):
560
+ r"""
561
+ Get the new hyper-parameters with respect to the previous ones and the index of the current layer.
562
+ """
563
+ if layer_dropout:
564
+ drop_path_rate = layer_dropout[layer_idx]
565
+ else:
566
+ drop_path_rate = 0.0
567
+
568
+ if layer_idx != 0:
569
+ stride = 1
570
+
571
+ is_first_layer = layer_idx == 0
572
+
573
+ return stride, drop_path_rate, is_first_layer
574
+
575
+ def forward(self, input: Tensor) -> Tensor:
576
+ hidden_state = input
577
+ for _, layer in enumerate(self.layers):
578
+ hidden_state = layer(hidden_state)
579
+ return hidden_state
580
+
581
+
582
+ class BitEncoder(nn.Module):
583
+ def __init__(self, config: BitConfig):
584
+ super().__init__()
585
+ self.stages = nn.ModuleList([])
586
+
587
+ prev_chs = config.embedding_size
588
+
589
+ # These needs to stay hardcoded
590
+ current_stride = 4
591
+ dilation = 1
592
+
593
+ layer_dropouts = [
594
+ x.tolist()
595
+ for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths)
596
+ ]
597
+
598
+ for stage_idx, (current_depth, current_hidden_size, layer_dropout) in enumerate(
599
+ zip(config.depths, config.hidden_sizes, layer_dropouts)
600
+ ):
601
+ # Get the updated hyper params
602
+ out_channels, stride, dilation = self._get_updated_hyperparameters(
603
+ stage_idx, current_stride, current_hidden_size, dilation, config
604
+ )
605
+
606
+ stage = BitStage(
607
+ config,
608
+ prev_chs,
609
+ out_channels,
610
+ stride=stride,
611
+ dilation=dilation,
612
+ depth=current_depth,
613
+ layer_dropout=layer_dropout,
614
+ )
615
+
616
+ prev_chs = out_channels
617
+ current_stride *= stride
618
+
619
+ self.stages.add_module(str(stage_idx), stage)
620
+
621
+ def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config):
622
+ out_channels = make_div(current_hidden_size * config.width_factor)
623
+ stride = 1 if stage_idx == 0 else 2
624
+ if current_stride >= config.output_stride:
625
+ dilation *= stride
626
+ stride = 1
627
+ return out_channels, stride, dilation
628
+
629
+ def forward(
630
+ self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
631
+ ) -> BaseModelOutputWithNoAttention:
632
+ hidden_states = () if output_hidden_states else None
633
+
634
+ for stage_module in self.stages:
635
+ if output_hidden_states:
636
+ hidden_states = hidden_states + (hidden_state,)
637
+
638
+ hidden_state = stage_module(hidden_state)
639
+
640
+ if output_hidden_states:
641
+ hidden_states = hidden_states + (hidden_state,)
642
+
643
+ if not return_dict:
644
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
645
+
646
+ return BaseModelOutputWithNoAttention(
647
+ last_hidden_state=hidden_state,
648
+ hidden_states=hidden_states,
649
+ )
650
+
651
+
652
+ class BitPreTrainedModel(PreTrainedModel):
653
+ """
654
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
655
+ models.
656
+ """
657
+
658
+ config_class = BitConfig
659
+ base_model_prefix = "bit"
660
+ main_input_name = "pixel_values"
661
+
662
+ def _init_weights(self, module):
663
+ if isinstance(module, nn.Conv2d):
664
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
665
+ elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
666
+ nn.init.constant_(module.weight, 1)
667
+ nn.init.constant_(module.bias, 0)
668
+
669
+
670
+ BIT_START_DOCSTRING = r"""
671
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
672
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
673
+ behavior.
674
+
675
+ Parameters:
676
+ config ([`BitConfig`]): Model configuration class with all the parameters of the model.
677
+ Initializing with a config file does not load the weights associated with the model, only the
678
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
679
+ """
680
+
681
+ BIT_INPUTS_DOCSTRING = r"""
682
+ Args:
683
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
684
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`]
685
+ for details.
686
+
687
+ output_hidden_states (`bool`, *optional*):
688
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
689
+ more detail.
690
+ return_dict (`bool`, *optional*):
691
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
692
+ """
693
+
694
+
695
+ @add_start_docstrings(
696
+ "The bare BiT model outputting raw features without any specific head on top.",
697
+ BIT_START_DOCSTRING,
698
+ )
699
+ class BitModel(BitPreTrainedModel):
700
+ def __init__(self, config):
701
+ super().__init__(config)
702
+ self.config = config
703
+
704
+ self.embedder = BitEmbeddings(config)
705
+
706
+ self.encoder = BitEncoder(config)
707
+ self.norm = (
708
+ BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1])
709
+ if config.layer_type == "preactivation"
710
+ else nn.Identity()
711
+ )
712
+
713
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1))
714
+ # Initialize weights and apply final processing
715
+ self.post_init()
716
+
717
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
718
+ @add_code_sample_docstrings(
719
+ checkpoint=_CHECKPOINT_FOR_DOC,
720
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
721
+ config_class=_CONFIG_FOR_DOC,
722
+ modality="vision",
723
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
724
+ )
725
+ def forward(
726
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
727
+ ) -> BaseModelOutputWithPoolingAndNoAttention:
728
+ output_hidden_states = (
729
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
730
+ )
731
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
732
+
733
+ embedding_output = self.embedder(pixel_values)
734
+
735
+ encoder_outputs = self.encoder(
736
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
737
+ )
738
+
739
+ last_hidden_state = encoder_outputs[0]
740
+
741
+ last_hidden_state = self.norm(last_hidden_state)
742
+
743
+ pooled_output = self.pooler(last_hidden_state)
744
+
745
+ if not return_dict:
746
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
747
+
748
+ return BaseModelOutputWithPoolingAndNoAttention(
749
+ last_hidden_state=last_hidden_state,
750
+ pooler_output=pooled_output,
751
+ hidden_states=encoder_outputs.hidden_states,
752
+ )
753
+
754
+
755
+ @add_start_docstrings(
756
+ """
757
+ BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
758
+ ImageNet.
759
+ """,
760
+ BIT_START_DOCSTRING,
761
+ )
762
+ class BitForImageClassification(BitPreTrainedModel):
763
+ def __init__(self, config):
764
+ super().__init__(config)
765
+ self.num_labels = config.num_labels
766
+ self.bit = BitModel(config)
767
+ # classification head
768
+ self.classifier = nn.Sequential(
769
+ nn.Flatten(),
770
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
771
+ )
772
+ # initialize weights and apply final processing
773
+ self.post_init()
774
+
775
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
776
+ @add_code_sample_docstrings(
777
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
778
+ output_type=ImageClassifierOutputWithNoAttention,
779
+ config_class=_CONFIG_FOR_DOC,
780
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
781
+ )
782
+ def forward(
783
+ self,
784
+ pixel_values: Optional[torch.FloatTensor] = None,
785
+ labels: Optional[torch.LongTensor] = None,
786
+ output_hidden_states: Optional[bool] = None,
787
+ return_dict: Optional[bool] = None,
788
+ ) -> ImageClassifierOutputWithNoAttention:
789
+ r"""
790
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
791
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
792
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
793
+ """
794
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
795
+
796
+ outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
797
+
798
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
799
+
800
+ logits = self.classifier(pooled_output)
801
+
802
+ loss = None
803
+
804
+ if labels is not None:
805
+ if self.config.problem_type is None:
806
+ if self.num_labels == 1:
807
+ self.config.problem_type = "regression"
808
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
809
+ self.config.problem_type = "single_label_classification"
810
+ else:
811
+ self.config.problem_type = "multi_label_classification"
812
+ if self.config.problem_type == "regression":
813
+ loss_fct = MSELoss()
814
+ if self.num_labels == 1:
815
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
816
+ else:
817
+ loss = loss_fct(logits, labels)
818
+ elif self.config.problem_type == "single_label_classification":
819
+ loss_fct = CrossEntropyLoss()
820
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
821
+ elif self.config.problem_type == "multi_label_classification":
822
+ loss_fct = BCEWithLogitsLoss()
823
+ loss = loss_fct(logits, labels)
824
+
825
+ if not return_dict:
826
+ output = (logits,) + outputs[2:]
827
+ return (loss,) + output if loss is not None else output
828
+
829
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
830
+
831
+
832
+ @add_start_docstrings(
833
+ """
834
+ BiT backbone, to be used with frameworks like DETR and MaskFormer.
835
+ """,
836
+ BIT_START_DOCSTRING,
837
+ )
838
+ class BitBackbone(BitPreTrainedModel, BackboneMixin):
839
+ def __init__(self, config):
840
+ super().__init__(config)
841
+ super()._init_backbone(config)
842
+
843
+ self.bit = BitModel(config)
844
+ self.num_features = [config.embedding_size] + config.hidden_sizes
845
+
846
+ # initialize weights and apply final processing
847
+ self.post_init()
848
+
849
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
850
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
851
+ def forward(
852
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
853
+ ) -> BackboneOutput:
854
+ """
855
+ Returns:
856
+
857
+ Examples:
858
+
859
+ ```python
860
+ >>> from transformers import AutoImageProcessor, AutoBackbone
861
+ >>> import torch
862
+ >>> from PIL import Image
863
+ >>> import requests
864
+
865
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
866
+ >>> image = Image.open(requests.get(url, stream=True).raw)
867
+
868
+ >>> processor = AutoImageProcessor.from_pretrained("google/resnetnv2-50")
869
+ >>> model = AutoBackbone.from_pretrained("google/resnetnv2-50")
870
+
871
+ >>> inputs = processor(image, return_tensors="pt")
872
+ >>> outputs = model(**inputs)
873
+ ```"""
874
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
875
+ output_hidden_states = (
876
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
877
+ )
878
+
879
+ outputs = self.bit(pixel_values, output_hidden_states=True, return_dict=True)
880
+
881
+ hidden_states = outputs.hidden_states
882
+
883
+ feature_maps = ()
884
+ for idx, stage in enumerate(self.stage_names):
885
+ if stage in self.out_features:
886
+ feature_maps += (hidden_states[idx],)
887
+
888
+ if not return_dict:
889
+ output = (feature_maps,)
890
+ if output_hidden_states:
891
+ output += (outputs.hidden_states,)
892
+ return output
893
+
894
+ return BackboneOutput(
895
+ feature_maps=feature_maps,
896
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
897
+ attentions=None,
898
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/configuration_camembert.cpython-310.pyc ADDED
Binary file (6.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_camembert.cpython-310.pyc ADDED
Binary file (45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/modeling_tf_camembert.cpython-310.pyc ADDED
Binary file (51.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/camembert/__pycache__/tokenization_camembert_fast.cpython-310.pyc ADDED
Binary file (7.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...file_utils import _LazyModule, is_torch_available
17
+ from ...utils import OptionalDependencyNotAvailable
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_depth_anything": ["DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP", "DepthAnythingConfig"]
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_depth_anything"] = [
31
+ "DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "DepthAnythingForDepthEstimation",
33
+ "DepthAnythingPreTrainedModel",
34
+ ]
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from .configuration_depth_anything import DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP, DepthAnythingConfig
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .modeling_depth_anything import (
47
+ DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST,
48
+ DepthAnythingForDepthEstimation,
49
+ DepthAnythingPreTrainedModel,
50
+ )
51
+
52
+
53
+ else:
54
+ import sys
55
+
56
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (985 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/configuration_depth_anything.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/convert_depth_anything_to_hf.cpython-310.pyc ADDED
Binary file (9.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/__pycache__/modeling_depth_anything.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/configuration_depth_anything.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DepthAnything model configuration"""
16
+
17
+ import copy
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+ from ..auto.configuration_auto import CONFIG_MAPPING
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class DepthAnythingConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`DepthAnythingModel`]. It is used to instantiate an DepthAnything
33
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the DepthAnything
35
+ [LiheYoung/depth-anything-small-hf](https://huggingface.co/LiheYoung/depth-anything-small-hf) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*):
42
+ The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to
43
+ leverage the [`AutoBackbone`] API.
44
+ backbone (`str`, *optional*):
45
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
46
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
47
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
48
+ use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
49
+ Whether to use pretrained weights for the backbone.
50
+ patch_size (`int`, *optional*, defaults to 14):
51
+ The size of the patches to extract from the backbone features.
52
+ initializer_range (`float`, *optional*, defaults to 0.02):
53
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
54
+ reassemble_hidden_size (`int`, *optional*, defaults to 384):
55
+ The number of input channels of the reassemble layers.
56
+ reassemble_factors (`List[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):
57
+ The up/downsampling factors of the reassemble layers.
58
+ neck_hidden_sizes (`List[str]`, *optional*, defaults to `[48, 96, 192, 384]`):
59
+ The hidden sizes to project to for the feature maps of the backbone.
60
+ fusion_hidden_size (`int`, *optional*, defaults to 64):
61
+ The number of channels before fusion.
62
+ head_in_index (`int`, *optional*, defaults to -1):
63
+ The index of the features to use in the depth estimation head.
64
+ head_hidden_size (`int`, *optional*, defaults to 32):
65
+ The number of output channels in the second convolution of the depth estimation head.
66
+
67
+ Example:
68
+
69
+ ```python
70
+ >>> from transformers import DepthAnythingConfig, DepthAnythingForDepthEstimation
71
+
72
+ >>> # Initializing a DepthAnything small style configuration
73
+ >>> configuration = DepthAnythingConfig()
74
+
75
+ >>> # Initializing a model from the DepthAnything small style configuration
76
+ >>> model = DepthAnythingForDepthEstimation(configuration)
77
+
78
+ >>> # Accessing the model configuration
79
+ >>> configuration = model.config
80
+ ```"""
81
+
82
+ model_type = "depth_anything"
83
+
84
+ def __init__(
85
+ self,
86
+ backbone_config=None,
87
+ backbone=None,
88
+ use_pretrained_backbone=False,
89
+ patch_size=14,
90
+ initializer_range=0.02,
91
+ reassemble_hidden_size=384,
92
+ reassemble_factors=[4, 2, 1, 0.5],
93
+ neck_hidden_sizes=[48, 96, 192, 384],
94
+ fusion_hidden_size=64,
95
+ head_in_index=-1,
96
+ head_hidden_size=32,
97
+ **kwargs,
98
+ ):
99
+ super().__init__(**kwargs)
100
+
101
+ if use_pretrained_backbone:
102
+ raise ValueError("Pretrained backbones are not supported yet.")
103
+
104
+ if backbone_config is not None and backbone is not None:
105
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
106
+
107
+ if backbone_config is None and backbone is None:
108
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.")
109
+ backbone_config = CONFIG_MAPPING["dinov2"](
110
+ image_size=518,
111
+ hidden_size=384,
112
+ num_attention_heads=6,
113
+ out_indices=[9, 10, 11, 12],
114
+ apply_layernorm=True,
115
+ reshape_hidden_states=False,
116
+ )
117
+ elif isinstance(backbone_config, dict):
118
+ backbone_model_type = backbone_config.get("model_type")
119
+ config_class = CONFIG_MAPPING[backbone_model_type]
120
+ backbone_config = config_class.from_dict(backbone_config)
121
+
122
+ self.backbone_config = backbone_config
123
+ self.backbone = backbone
124
+ self.use_pretrained_backbone = use_pretrained_backbone
125
+ self.reassemble_hidden_size = reassemble_hidden_size
126
+ self.patch_size = patch_size
127
+ self.initializer_range = initializer_range
128
+ self.reassemble_factors = reassemble_factors
129
+ self.neck_hidden_sizes = neck_hidden_sizes
130
+ self.fusion_hidden_size = fusion_hidden_size
131
+ self.head_in_index = head_in_index
132
+ self.head_hidden_size = head_hidden_size
133
+
134
+ def to_dict(self):
135
+ """
136
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns:
137
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
138
+ """
139
+ output = copy.deepcopy(self.__dict__)
140
+
141
+ if output["backbone_config"] is not None:
142
+ output["backbone_config"] = self.backbone_config.to_dict()
143
+
144
+ output["model_type"] = self.__class__.model_type
145
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/convert_depth_anything_to_hf.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Depth Anything checkpoints from the original repository. URL:
16
+ https://github.com/LiheYoung/Depth-Anything"""
17
+
18
+
19
+ import argparse
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import DepthAnythingConfig, DepthAnythingForDepthEstimation, Dinov2Config, DPTImageProcessor
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def get_dpt_config(model_name):
36
+ if "small" in model_name:
37
+ backbone_config = Dinov2Config.from_pretrained(
38
+ "facebook/dinov2-small", out_indices=[9, 10, 11, 12], apply_layernorm=True, reshape_hidden_states=False
39
+ )
40
+ fusion_hidden_size = 64
41
+ neck_hidden_sizes = [48, 96, 192, 384]
42
+ elif "base" in model_name:
43
+ backbone_config = Dinov2Config.from_pretrained(
44
+ "facebook/dinov2-base", out_indices=[9, 10, 11, 12], apply_layernorm=True, reshape_hidden_states=False
45
+ )
46
+ fusion_hidden_size = 128
47
+ neck_hidden_sizes = [96, 192, 384, 768]
48
+ elif "large" in model_name:
49
+ backbone_config = Dinov2Config.from_pretrained(
50
+ "facebook/dinov2-large", out_indices=[21, 22, 23, 24], apply_layernorm=True, reshape_hidden_states=False
51
+ )
52
+ fusion_hidden_size = 256
53
+ neck_hidden_sizes = [256, 512, 1024, 1024]
54
+ else:
55
+ raise NotImplementedError("To do")
56
+
57
+ config = DepthAnythingConfig(
58
+ reassemble_hidden_size=backbone_config.hidden_size,
59
+ patch_size=backbone_config.patch_size,
60
+ backbone_config=backbone_config,
61
+ fusion_hidden_size=fusion_hidden_size,
62
+ neck_hidden_sizes=neck_hidden_sizes,
63
+ )
64
+
65
+ return config
66
+
67
+
68
+ def create_rename_keys(config):
69
+ rename_keys = []
70
+
71
+ # fmt: off
72
+ # stem
73
+ rename_keys.append(("pretrained.cls_token", "backbone.embeddings.cls_token"))
74
+ rename_keys.append(("pretrained.mask_token", "backbone.embeddings.mask_token"))
75
+ rename_keys.append(("pretrained.pos_embed", "backbone.embeddings.position_embeddings"))
76
+ rename_keys.append(("pretrained.patch_embed.proj.weight", "backbone.embeddings.patch_embeddings.projection.weight"))
77
+ rename_keys.append(("pretrained.patch_embed.proj.bias", "backbone.embeddings.patch_embeddings.projection.bias"))
78
+
79
+ # Transfomer encoder
80
+ for i in range(config.backbone_config.num_hidden_layers):
81
+ rename_keys.append((f"pretrained.blocks.{i}.ls1.gamma", f"backbone.encoder.layer.{i}.layer_scale1.lambda1"))
82
+ rename_keys.append((f"pretrained.blocks.{i}.ls2.gamma", f"backbone.encoder.layer.{i}.layer_scale2.lambda1"))
83
+ rename_keys.append((f"pretrained.blocks.{i}.norm1.weight", f"backbone.encoder.layer.{i}.norm1.weight"))
84
+ rename_keys.append((f"pretrained.blocks.{i}.norm1.bias", f"backbone.encoder.layer.{i}.norm1.bias"))
85
+ rename_keys.append((f"pretrained.blocks.{i}.norm2.weight", f"backbone.encoder.layer.{i}.norm2.weight"))
86
+ rename_keys.append((f"pretrained.blocks.{i}.norm2.bias", f"backbone.encoder.layer.{i}.norm2.bias"))
87
+ rename_keys.append((f"pretrained.blocks.{i}.mlp.fc1.weight", f"backbone.encoder.layer.{i}.mlp.fc1.weight"))
88
+ rename_keys.append((f"pretrained.blocks.{i}.mlp.fc1.bias", f"backbone.encoder.layer.{i}.mlp.fc1.bias"))
89
+ rename_keys.append((f"pretrained.blocks.{i}.mlp.fc2.weight", f"backbone.encoder.layer.{i}.mlp.fc2.weight"))
90
+ rename_keys.append((f"pretrained.blocks.{i}.mlp.fc2.bias", f"backbone.encoder.layer.{i}.mlp.fc2.bias"))
91
+ rename_keys.append((f"pretrained.blocks.{i}.attn.proj.weight", f"backbone.encoder.layer.{i}.attention.output.dense.weight"))
92
+ rename_keys.append((f"pretrained.blocks.{i}.attn.proj.bias", f"backbone.encoder.layer.{i}.attention.output.dense.bias"))
93
+
94
+ # Head
95
+ rename_keys.append(("pretrained.norm.weight", "backbone.layernorm.weight"))
96
+ rename_keys.append(("pretrained.norm.bias", "backbone.layernorm.bias"))
97
+
98
+ # activation postprocessing (readout projections + resize blocks)
99
+ # Depth Anything does not use CLS token => readout_projects not required
100
+
101
+ for i in range(4):
102
+ rename_keys.append((f"depth_head.projects.{i}.weight", f"neck.reassemble_stage.layers.{i}.projection.weight"))
103
+ rename_keys.append((f"depth_head.projects.{i}.bias", f"neck.reassemble_stage.layers.{i}.projection.bias"))
104
+
105
+ if i != 2:
106
+ rename_keys.append((f"depth_head.resize_layers.{i}.weight", f"neck.reassemble_stage.layers.{i}.resize.weight"))
107
+ rename_keys.append((f"depth_head.resize_layers.{i}.bias", f"neck.reassemble_stage.layers.{i}.resize.bias"))
108
+
109
+ # refinenet (tricky here)
110
+ mapping = {1:3, 2:2, 3:1, 4:0}
111
+
112
+ for i in range(1, 5):
113
+ j = mapping[i]
114
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.out_conv.weight", f"neck.fusion_stage.layers.{j}.projection.weight"))
115
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.out_conv.bias", f"neck.fusion_stage.layers.{j}.projection.bias"))
116
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight"))
117
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias"))
118
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight"))
119
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit1.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias"))
120
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv1.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight"))
121
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv1.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias"))
122
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv2.weight", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight"))
123
+ rename_keys.append((f"depth_head.scratch.refinenet{i}.resConfUnit2.conv2.bias", f"neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias"))
124
+
125
+ # scratch convolutions
126
+ for i in range(4):
127
+ rename_keys.append((f"depth_head.scratch.layer{i+1}_rn.weight", f"neck.convs.{i}.weight"))
128
+
129
+ # head
130
+ rename_keys.append(("depth_head.scratch.output_conv1.weight", "head.conv1.weight"))
131
+ rename_keys.append(("depth_head.scratch.output_conv1.bias", "head.conv1.bias"))
132
+ rename_keys.append(("depth_head.scratch.output_conv2.0.weight", "head.conv2.weight"))
133
+ rename_keys.append(("depth_head.scratch.output_conv2.0.bias", "head.conv2.bias"))
134
+ rename_keys.append(("depth_head.scratch.output_conv2.2.weight", "head.conv3.weight"))
135
+ rename_keys.append(("depth_head.scratch.output_conv2.2.bias", "head.conv3.bias"))
136
+
137
+ return rename_keys
138
+
139
+
140
+ # we split up the matrix of each encoder layer into queries, keys and values
141
+ def read_in_q_k_v(state_dict, config):
142
+ hidden_size = config.backbone_config.hidden_size
143
+ for i in range(config.backbone_config.num_hidden_layers):
144
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
145
+ in_proj_weight = state_dict.pop(f"pretrained.blocks.{i}.attn.qkv.weight")
146
+ in_proj_bias = state_dict.pop(f"pretrained.blocks.{i}.attn.qkv.bias")
147
+ # next, add query, keys and values (in that order) to the state dict
148
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[:hidden_size, :]
149
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[:hidden_size]
150
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
151
+ hidden_size : hidden_size * 2, :
152
+ ]
153
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
154
+ hidden_size : hidden_size * 2
155
+ ]
156
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-hidden_size:, :]
157
+ state_dict[f"backbone.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-hidden_size:]
158
+
159
+
160
+ def rename_key(dct, old, new):
161
+ val = dct.pop(old)
162
+ dct[new] = val
163
+
164
+
165
+ # We will verify our results on an image of cute cats
166
+ def prepare_img():
167
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
168
+ im = Image.open(requests.get(url, stream=True).raw)
169
+ return im
170
+
171
+
172
+ name_to_checkpoint = {
173
+ "depth-anything-small": "depth_anything_vits14.pth",
174
+ "depth-anything-base": "depth_anything_vitb14.pth",
175
+ "depth-anything-large": "depth_anything_vitl14.pth",
176
+ }
177
+
178
+
179
+ @torch.no_grad()
180
+ def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits):
181
+ """
182
+ Copy/paste/tweak model's weights to our DPT structure.
183
+ """
184
+
185
+ # define DPT configuration
186
+ config = get_dpt_config(model_name)
187
+
188
+ model_name_to_filename = {
189
+ "depth-anything-small": "depth_anything_vits14.pth",
190
+ "depth-anything-base": "depth_anything_vitb14.pth",
191
+ "depth-anything-large": "depth_anything_vitl14.pth",
192
+ }
193
+
194
+ # load original state_dict
195
+ filename = model_name_to_filename[model_name]
196
+ filepath = hf_hub_download(
197
+ repo_id="LiheYoung/Depth-Anything", filename=f"checkpoints/{filename}", repo_type="space"
198
+ )
199
+ state_dict = torch.load(filepath, map_location="cpu")
200
+ # rename keys
201
+ rename_keys = create_rename_keys(config)
202
+ for src, dest in rename_keys:
203
+ rename_key(state_dict, src, dest)
204
+ # read in qkv matrices
205
+ read_in_q_k_v(state_dict, config)
206
+
207
+ # load HuggingFace model
208
+ model = DepthAnythingForDepthEstimation(config)
209
+ model.load_state_dict(state_dict)
210
+ model.eval()
211
+
212
+ processor = DPTImageProcessor(
213
+ do_resize=True,
214
+ size={"height": 518, "width": 518},
215
+ ensure_multiple_of=14,
216
+ keep_aspect_ratio=True,
217
+ do_rescale=True,
218
+ do_normalize=True,
219
+ image_mean=[0.485, 0.456, 0.406],
220
+ image_std=[0.229, 0.224, 0.225],
221
+ )
222
+
223
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
224
+ image = Image.open(requests.get(url, stream=True).raw)
225
+
226
+ pixel_values = processor(image, return_tensors="pt").pixel_values
227
+
228
+ # Verify forward pass
229
+ with torch.no_grad():
230
+ outputs = model(pixel_values)
231
+ predicted_depth = outputs.predicted_depth
232
+
233
+ print("Shape of predicted depth:", predicted_depth.shape)
234
+ print("First values:", predicted_depth[0, :3, :3])
235
+
236
+ # assert logits
237
+ if verify_logits:
238
+ expected_shape = torch.Size([1, 518, 686])
239
+ if model_name == "depth-anything-small":
240
+ expected_slice = torch.tensor(
241
+ [[8.8204, 8.6468, 8.6195], [8.3313, 8.6027, 8.7526], [8.6526, 8.6866, 8.7453]],
242
+ )
243
+ elif model_name == "depth-anything-base":
244
+ expected_slice = torch.tensor(
245
+ [[26.3997, 26.3004, 26.3928], [26.2260, 26.2092, 26.3427], [26.0719, 26.0483, 26.1254]],
246
+ )
247
+ elif model_name == "depth-anything-large":
248
+ expected_slice = torch.tensor(
249
+ [[87.9968, 87.7493, 88.2704], [87.1927, 87.6611, 87.3640], [86.7789, 86.9469, 86.7991]]
250
+ )
251
+ else:
252
+ raise ValueError("Not supported")
253
+
254
+ assert predicted_depth.shape == torch.Size(expected_shape)
255
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-6)
256
+ print("Looks ok!")
257
+
258
+ if pytorch_dump_folder_path is not None:
259
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
260
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
261
+ model.save_pretrained(pytorch_dump_folder_path)
262
+ processor.save_pretrained(pytorch_dump_folder_path)
263
+
264
+ if push_to_hub:
265
+ print("Pushing model and processor to hub...")
266
+ model.push_to_hub(repo_id=f"LiheYoung/{model_name}-hf")
267
+ processor.push_to_hub(repo_id=f"LiheYoung/{model_name}-hf")
268
+
269
+
270
+ if __name__ == "__main__":
271
+ parser = argparse.ArgumentParser()
272
+ # Required parameters
273
+ parser.add_argument(
274
+ "--model_name",
275
+ default="depth-anything-small",
276
+ type=str,
277
+ choices=name_to_checkpoint.keys(),
278
+ help="Name of the model you'd like to convert.",
279
+ )
280
+ parser.add_argument(
281
+ "--pytorch_dump_folder_path",
282
+ default=None,
283
+ type=str,
284
+ help="Path to the output PyTorch model directory.",
285
+ )
286
+ parser.add_argument(
287
+ "--push_to_hub",
288
+ action="store_true",
289
+ help="Whether to push the model to the hub after conversion.",
290
+ )
291
+ parser.add_argument(
292
+ "--verify_logits",
293
+ action="store_false",
294
+ required=False,
295
+ help="Whether to verify the logits after conversion.",
296
+ )
297
+
298
+ args = parser.parse_args()
299
+ convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits)
llmeval-env/lib/python3.10/site-packages/transformers/models/depth_anything/modeling_depth_anything.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 TikTok and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Depth Anything model."""
16
+
17
+
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ...file_utils import (
25
+ add_start_docstrings,
26
+ add_start_docstrings_to_model_forward,
27
+ replace_return_docstrings,
28
+ )
29
+ from ...modeling_outputs import DepthEstimatorOutput
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import logging
32
+ from ..auto import AutoBackbone
33
+ from .configuration_depth_anything import DepthAnythingConfig
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ # General docstring
39
+ _CONFIG_FOR_DOC = "DepthAnythingConfig"
40
+
41
+
42
+ from ..deprecated._archive_maps import DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
43
+
44
+
45
+ DEPTH_ANYTHING_START_DOCSTRING = r"""
46
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
47
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
48
+ behavior.
49
+
50
+ Parameters:
51
+ config ([`DepthAnythingConfig`]): Model configuration class with all the parameters of the model.
52
+ Initializing with a config file does not load the weights associated with the model, only the
53
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
54
+ """
55
+
56
+ DEPTH_ANYTHING_INPUTS_DOCSTRING = r"""
57
+ Args:
58
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
59
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DPTImageProcessor.__call__`]
60
+ for details.
61
+
62
+ output_attentions (`bool`, *optional*):
63
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
64
+ tensors for more detail.
65
+ output_hidden_states (`bool`, *optional*):
66
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
67
+ more detail.
68
+ return_dict (`bool`, *optional*):
69
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
70
+ """
71
+
72
+
73
+ class DepthAnythingReassembleLayer(nn.Module):
74
+ def __init__(self, config, channels, factor):
75
+ super().__init__()
76
+ self.projection = nn.Conv2d(in_channels=config.reassemble_hidden_size, out_channels=channels, kernel_size=1)
77
+
78
+ # up/down sampling depending on factor
79
+ if factor > 1:
80
+ self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0)
81
+ elif factor == 1:
82
+ self.resize = nn.Identity()
83
+ elif factor < 1:
84
+ # so should downsample
85
+ self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1)
86
+
87
+ # Copied from transformers.models.dpt.modeling_dpt.DPTReassembleLayer.forward
88
+ def forward(self, hidden_state):
89
+ hidden_state = self.projection(hidden_state)
90
+ hidden_state = self.resize(hidden_state)
91
+
92
+ return hidden_state
93
+
94
+
95
+ class DepthAnythingReassembleStage(nn.Module):
96
+ """
97
+ This class reassembles the hidden states of the backbone into image-like feature representations at various
98
+ resolutions.
99
+
100
+ This happens in 3 stages:
101
+ 1. Take the patch embeddings and reshape them to image-like feature representations.
102
+ 2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
103
+ 3. Resizing the spatial dimensions (height, width).
104
+
105
+ Args:
106
+ config (`[DepthAnythingConfig]`):
107
+ Model configuration class defining the model architecture.
108
+ """
109
+
110
+ def __init__(self, config):
111
+ super().__init__()
112
+
113
+ self.config = config
114
+ self.layers = nn.ModuleList()
115
+ for channels, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):
116
+ self.layers.append(DepthAnythingReassembleLayer(config, channels=channels, factor=factor))
117
+
118
+ def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:
119
+ """
120
+ Args:
121
+ hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
122
+ List of hidden states from the backbone.
123
+ """
124
+ out = []
125
+
126
+ for i, hidden_state in enumerate(hidden_states):
127
+ # reshape to (batch_size, num_channels, height, width)
128
+ hidden_state = hidden_state[:, 1:]
129
+ batch_size, _, num_channels = hidden_state.shape
130
+ hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)
131
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
132
+ hidden_state = self.layers[i](hidden_state)
133
+ out.append(hidden_state)
134
+
135
+ return out
136
+
137
+
138
+ class DepthAnythingPreActResidualLayer(nn.Module):
139
+ """
140
+ ResidualConvUnit, pre-activate residual unit.
141
+
142
+ Args:
143
+ config (`[DepthAnythingConfig]`):
144
+ Model configuration class defining the model architecture.
145
+ """
146
+
147
+ def __init__(self, config):
148
+ super().__init__()
149
+
150
+ self.activation1 = nn.ReLU()
151
+ self.convolution1 = nn.Conv2d(
152
+ config.fusion_hidden_size,
153
+ config.fusion_hidden_size,
154
+ kernel_size=3,
155
+ stride=1,
156
+ padding=1,
157
+ bias=True,
158
+ )
159
+
160
+ self.activation2 = nn.ReLU()
161
+ self.convolution2 = nn.Conv2d(
162
+ config.fusion_hidden_size,
163
+ config.fusion_hidden_size,
164
+ kernel_size=3,
165
+ stride=1,
166
+ padding=1,
167
+ bias=True,
168
+ )
169
+
170
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
171
+ residual = hidden_state
172
+ hidden_state = self.activation1(hidden_state)
173
+ hidden_state = self.convolution1(hidden_state)
174
+ hidden_state = self.activation2(hidden_state)
175
+ hidden_state = self.convolution2(hidden_state)
176
+
177
+ return hidden_state + residual
178
+
179
+
180
+ class DepthAnythingFeatureFusionLayer(nn.Module):
181
+ """Feature fusion layer, merges feature maps from different stages.
182
+
183
+ Args:
184
+ config (`[DepthAnythingConfig]`):
185
+ Model configuration class defining the model architecture.
186
+ """
187
+
188
+ def __init__(self, config):
189
+ super().__init__()
190
+
191
+ self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
192
+
193
+ self.residual_layer1 = DepthAnythingPreActResidualLayer(config)
194
+ self.residual_layer2 = DepthAnythingPreActResidualLayer(config)
195
+
196
+ def forward(self, hidden_state, residual=None, size=None):
197
+ if residual is not None:
198
+ if hidden_state.shape != residual.shape:
199
+ residual = nn.functional.interpolate(
200
+ residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode="bilinear", align_corners=False
201
+ )
202
+ hidden_state = hidden_state + self.residual_layer1(residual)
203
+
204
+ hidden_state = self.residual_layer2(hidden_state)
205
+
206
+ modifier = {"scale_factor": 2} if size is None else {"size": size}
207
+
208
+ hidden_state = nn.functional.interpolate(
209
+ hidden_state,
210
+ **modifier,
211
+ mode="bilinear",
212
+ align_corners=True,
213
+ )
214
+ hidden_state = self.projection(hidden_state)
215
+
216
+ return hidden_state
217
+
218
+
219
+ class DepthAnythingFeatureFusionStage(nn.Module):
220
+ # Copied from transformers.models.dpt.modeling_dpt.DPTFeatureFusionStage.__init__ with DPT->DepthAnything
221
+ def __init__(self, config):
222
+ super().__init__()
223
+ self.layers = nn.ModuleList()
224
+ for _ in range(len(config.neck_hidden_sizes)):
225
+ self.layers.append(DepthAnythingFeatureFusionLayer(config))
226
+
227
+ def forward(self, hidden_states, size=None):
228
+ # reversing the hidden_states, we start from the last
229
+ hidden_states = hidden_states[::-1]
230
+
231
+ fused_hidden_states = []
232
+ # first layer only uses the last hidden_state
233
+ size = hidden_states[1].shape[2:]
234
+ fused_hidden_state = self.layers[0](hidden_states[0], size=size)
235
+ fused_hidden_states.append(fused_hidden_state)
236
+
237
+ # looping from the last layer to the second
238
+ for idx, (hidden_state, layer) in enumerate(zip(hidden_states[1:], self.layers[1:])):
239
+ size = hidden_states[1:][idx + 1].shape[2:] if idx != (len(hidden_states[1:]) - 1) else None
240
+
241
+ fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size)
242
+
243
+ fused_hidden_states.append(fused_hidden_state)
244
+
245
+ return fused_hidden_states
246
+
247
+
248
+ # Copied from transformers.models.dpt.modeling_dpt.DPTPreTrainedModel with DPT->DepthAnything,dpt->depth_anything
249
+ class DepthAnythingPreTrainedModel(PreTrainedModel):
250
+ """
251
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
252
+ models.
253
+ """
254
+
255
+ config_class = DepthAnythingConfig
256
+ base_model_prefix = "depth_anything"
257
+ main_input_name = "pixel_values"
258
+ supports_gradient_checkpointing = True
259
+
260
+ def _init_weights(self, module):
261
+ """Initialize the weights"""
262
+ if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
263
+ # Slightly different from the TF version which uses truncated_normal for initialization
264
+ # cf https://github.com/pytorch/pytorch/pull/5617
265
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
266
+ if module.bias is not None:
267
+ module.bias.data.zero_()
268
+ elif isinstance(module, nn.LayerNorm):
269
+ module.bias.data.zero_()
270
+ module.weight.data.fill_(1.0)
271
+
272
+
273
+ class DepthAnythingNeck(nn.Module):
274
+ """
275
+ DepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
276
+ input and produces another list of tensors as output. For DepthAnything, it includes 2 stages:
277
+
278
+ * DepthAnythingReassembleStage
279
+ * DepthAnythingFeatureFusionStage.
280
+
281
+ Args:
282
+ config (dict): config dict.
283
+ """
284
+
285
+ def __init__(self, config):
286
+ super().__init__()
287
+ self.config = config
288
+
289
+ self.reassemble_stage = DepthAnythingReassembleStage(config)
290
+
291
+ self.convs = nn.ModuleList()
292
+ for channel in config.neck_hidden_sizes:
293
+ self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
294
+
295
+ # fusion
296
+ self.fusion_stage = DepthAnythingFeatureFusionStage(config)
297
+
298
+ def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:
299
+ """
300
+ Args:
301
+ hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
302
+ List of hidden states from the backbone.
303
+ """
304
+ if not isinstance(hidden_states, (tuple, list)):
305
+ raise ValueError("hidden_states should be a tuple or list of tensors")
306
+
307
+ if len(hidden_states) != len(self.config.neck_hidden_sizes):
308
+ raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")
309
+
310
+ # postprocess hidden states
311
+ hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
312
+
313
+ features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
314
+
315
+ # fusion blocks
316
+ output = self.fusion_stage(features)
317
+
318
+ return output
319
+
320
+
321
+ class DepthAnythingDepthEstimationHead(nn.Module):
322
+ """
323
+ Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
324
+ the predictions to the input resolution after the first convolutional layer (details can be found in the DPT paper's
325
+ supplementary material).
326
+ """
327
+
328
+ def __init__(self, config):
329
+ super().__init__()
330
+
331
+ self.head_in_index = config.head_in_index
332
+ self.patch_size = config.patch_size
333
+
334
+ features = config.fusion_hidden_size
335
+ self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1)
336
+ self.conv2 = nn.Conv2d(features // 2, config.head_hidden_size, kernel_size=3, stride=1, padding=1)
337
+ self.activation1 = nn.ReLU()
338
+ self.conv3 = nn.Conv2d(config.head_hidden_size, 1, kernel_size=1, stride=1, padding=0)
339
+ self.activation2 = nn.ReLU()
340
+
341
+ def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> torch.Tensor:
342
+ hidden_states = hidden_states[self.head_in_index]
343
+
344
+ predicted_depth = self.conv1(hidden_states)
345
+ predicted_depth = nn.functional.interpolate(
346
+ predicted_depth,
347
+ (int(patch_height * self.patch_size), int(patch_width * self.patch_size)),
348
+ mode="bilinear",
349
+ align_corners=True,
350
+ )
351
+ predicted_depth = self.conv2(predicted_depth)
352
+ predicted_depth = self.activation1(predicted_depth)
353
+ predicted_depth = self.conv3(predicted_depth)
354
+ predicted_depth = self.activation2(predicted_depth)
355
+ predicted_depth = predicted_depth.squeeze(dim=1) # shape (batch_size, height, width)
356
+
357
+ return predicted_depth
358
+
359
+
360
+ @add_start_docstrings(
361
+ """
362
+ Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.
363
+ """,
364
+ DEPTH_ANYTHING_START_DOCSTRING,
365
+ )
366
+ class DepthAnythingForDepthEstimation(DepthAnythingPreTrainedModel):
367
+ def __init__(self, config):
368
+ super().__init__(config)
369
+
370
+ self.backbone = AutoBackbone.from_config(config.backbone_config)
371
+ self.neck = DepthAnythingNeck(config)
372
+ self.head = DepthAnythingDepthEstimationHead(config)
373
+
374
+ # Initialize weights and apply final processing
375
+ self.post_init()
376
+
377
+ @add_start_docstrings_to_model_forward(DEPTH_ANYTHING_INPUTS_DOCSTRING)
378
+ @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
379
+ def forward(
380
+ self,
381
+ pixel_values: torch.FloatTensor,
382
+ labels: Optional[torch.LongTensor] = None,
383
+ output_attentions: Optional[bool] = None,
384
+ output_hidden_states: Optional[bool] = None,
385
+ return_dict: Optional[bool] = None,
386
+ ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
387
+ r"""
388
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
389
+ Ground truth depth estimation maps for computing the loss.
390
+
391
+ Returns:
392
+
393
+ Examples:
394
+ ```python
395
+ >>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation
396
+ >>> import torch
397
+ >>> import numpy as np
398
+ >>> from PIL import Image
399
+ >>> import requests
400
+
401
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
402
+ >>> image = Image.open(requests.get(url, stream=True).raw)
403
+
404
+ >>> image_processor = AutoImageProcessor.from_pretrained("LiheYoung/depth-anything-small-hf")
405
+ >>> model = AutoModelForDepthEstimation.from_pretrained("LiheYoung/depth-anything-small-hf")
406
+
407
+ >>> # prepare image for the model
408
+ >>> inputs = image_processor(images=image, return_tensors="pt")
409
+
410
+ >>> with torch.no_grad():
411
+ ... outputs = model(**inputs)
412
+ ... predicted_depth = outputs.predicted_depth
413
+
414
+ >>> # interpolate to original size
415
+ >>> prediction = torch.nn.functional.interpolate(
416
+ ... predicted_depth.unsqueeze(1),
417
+ ... size=image.size[::-1],
418
+ ... mode="bicubic",
419
+ ... align_corners=False,
420
+ ... )
421
+
422
+ >>> # visualize the prediction
423
+ >>> output = prediction.squeeze().cpu().numpy()
424
+ >>> formatted = (output * 255 / np.max(output)).astype("uint8")
425
+ >>> depth = Image.fromarray(formatted)
426
+ ```"""
427
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
428
+ output_hidden_states = (
429
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
430
+ )
431
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
432
+
433
+ outputs = self.backbone.forward_with_filtered_kwargs(
434
+ pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
435
+ )
436
+ hidden_states = outputs.feature_maps
437
+
438
+ _, _, height, width = pixel_values.shape
439
+ patch_size = self.config.patch_size
440
+ patch_height = height // patch_size
441
+ patch_width = width // patch_size
442
+
443
+ hidden_states = self.neck(hidden_states, patch_height, patch_width)
444
+
445
+ predicted_depth = self.head(hidden_states, patch_height, patch_width)
446
+
447
+ loss = None
448
+ if labels is not None:
449
+ raise NotImplementedError("Training is not implemented yet")
450
+
451
+ if not return_dict:
452
+ if output_hidden_states:
453
+ output = (predicted_depth,) + outputs[1:]
454
+ else:
455
+ output = (predicted_depth,) + outputs[2:]
456
+ return ((loss,) + output) if loss is not None else output
457
+
458
+ return DepthEstimatorOutput(
459
+ loss=loss,
460
+ predicted_depth=predicted_depth,
461
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
462
+ attentions=outputs.attentions,
463
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__init__.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
29
+ "tokenization_electra": ["ElectraTokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_electra"] = [
47
+ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "ElectraForCausalLM",
49
+ "ElectraForMaskedLM",
50
+ "ElectraForMultipleChoice",
51
+ "ElectraForPreTraining",
52
+ "ElectraForQuestionAnswering",
53
+ "ElectraForSequenceClassification",
54
+ "ElectraForTokenClassification",
55
+ "ElectraModel",
56
+ "ElectraPreTrainedModel",
57
+ "load_tf_weights_in_electra",
58
+ ]
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_electra"] = [
67
+ "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFElectraForMaskedLM",
69
+ "TFElectraForMultipleChoice",
70
+ "TFElectraForPreTraining",
71
+ "TFElectraForQuestionAnswering",
72
+ "TFElectraForSequenceClassification",
73
+ "TFElectraForTokenClassification",
74
+ "TFElectraModel",
75
+ "TFElectraPreTrainedModel",
76
+ ]
77
+
78
+ try:
79
+ if not is_flax_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ _import_structure["modeling_flax_electra"] = [
85
+ "FlaxElectraForCausalLM",
86
+ "FlaxElectraForMaskedLM",
87
+ "FlaxElectraForMultipleChoice",
88
+ "FlaxElectraForPreTraining",
89
+ "FlaxElectraForQuestionAnswering",
90
+ "FlaxElectraForSequenceClassification",
91
+ "FlaxElectraForTokenClassification",
92
+ "FlaxElectraModel",
93
+ "FlaxElectraPreTrainedModel",
94
+ ]
95
+
96
+
97
+ if TYPE_CHECKING:
98
+ from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
99
+ from .tokenization_electra import ElectraTokenizer
100
+
101
+ try:
102
+ if not is_tokenizers_available():
103
+ raise OptionalDependencyNotAvailable()
104
+ except OptionalDependencyNotAvailable:
105
+ pass
106
+ else:
107
+ from .tokenization_electra_fast import ElectraTokenizerFast
108
+
109
+ try:
110
+ if not is_torch_available():
111
+ raise OptionalDependencyNotAvailable()
112
+ except OptionalDependencyNotAvailable:
113
+ pass
114
+ else:
115
+ from .modeling_electra import (
116
+ ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
117
+ ElectraForCausalLM,
118
+ ElectraForMaskedLM,
119
+ ElectraForMultipleChoice,
120
+ ElectraForPreTraining,
121
+ ElectraForQuestionAnswering,
122
+ ElectraForSequenceClassification,
123
+ ElectraForTokenClassification,
124
+ ElectraModel,
125
+ ElectraPreTrainedModel,
126
+ load_tf_weights_in_electra,
127
+ )
128
+
129
+ try:
130
+ if not is_tf_available():
131
+ raise OptionalDependencyNotAvailable()
132
+ except OptionalDependencyNotAvailable:
133
+ pass
134
+ else:
135
+ from .modeling_tf_electra import (
136
+ TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
137
+ TFElectraForMaskedLM,
138
+ TFElectraForMultipleChoice,
139
+ TFElectraForPreTraining,
140
+ TFElectraForQuestionAnswering,
141
+ TFElectraForSequenceClassification,
142
+ TFElectraForTokenClassification,
143
+ TFElectraModel,
144
+ TFElectraPreTrainedModel,
145
+ )
146
+
147
+ try:
148
+ if not is_flax_available():
149
+ raise OptionalDependencyNotAvailable()
150
+ except OptionalDependencyNotAvailable:
151
+ pass
152
+ else:
153
+ from .modeling_flax_electra import (
154
+ FlaxElectraForCausalLM,
155
+ FlaxElectraForMaskedLM,
156
+ FlaxElectraForMultipleChoice,
157
+ FlaxElectraForPreTraining,
158
+ FlaxElectraForQuestionAnswering,
159
+ FlaxElectraForSequenceClassification,
160
+ FlaxElectraForTokenClassification,
161
+ FlaxElectraModel,
162
+ FlaxElectraPreTrainedModel,
163
+ )
164
+
165
+ else:
166
+ import sys
167
+
168
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/configuration_electra.cpython-310.pyc ADDED
Binary file (8.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/convert_electra_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_electra.cpython-310.pyc ADDED
Binary file (49.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_flax_electra.cpython-310.pyc ADDED
Binary file (40.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_tf_electra.cpython-310.pyc ADDED
Binary file (51.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra_fast.cpython-310.pyc ADDED
Binary file (6.68 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/configuration_electra.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ ELECTRA model configuration"""
17
+
18
+ from collections import OrderedDict
19
+ from typing import Mapping
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class ElectraConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is
35
+ used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture.
36
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA
37
+ [google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ vocab_size (`int`, *optional*, defaults to 30522):
45
+ Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the
46
+ `inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
47
+ embedding_size (`int`, *optional*, defaults to 128):
48
+ Dimensionality of the encoder layers and the pooler layer.
49
+ hidden_size (`int`, *optional*, defaults to 256):
50
+ Dimensionality of the encoder layers and the pooler layer.
51
+ num_hidden_layers (`int`, *optional*, defaults to 12):
52
+ Number of hidden layers in the Transformer encoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 4):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ intermediate_size (`int`, *optional*, defaults to 1024):
56
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
57
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ max_position_embeddings (`int`, *optional*, defaults to 512):
65
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
66
+ just in case (e.g., 512 or 1024 or 2048).
67
+ type_vocab_size (`int`, *optional*, defaults to 2):
68
+ The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ summary_type (`str`, *optional*, defaults to `"first"`):
74
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
75
+
76
+ Has to be one of the following options:
77
+
78
+ - `"last"`: Take the last token hidden state (like XLNet).
79
+ - `"first"`: Take the first token hidden state (like BERT).
80
+ - `"mean"`: Take the mean of all tokens hidden states.
81
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
82
+ - `"attn"`: Not implemented now, use multi-head attention.
83
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
84
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
85
+
86
+ Whether or not to add a projection after the vector extraction.
87
+ summary_activation (`str`, *optional*):
88
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
89
+
90
+ Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation.
91
+ summary_last_dropout (`float`, *optional*, defaults to 0.0):
92
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
93
+
94
+ The dropout ratio to be used after the projection and activation.
95
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
96
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
97
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
98
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
99
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
100
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
101
+ use_cache (`bool`, *optional*, defaults to `True`):
102
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
103
+ relevant if `config.is_decoder=True`.
104
+ classifier_dropout (`float`, *optional*):
105
+ The dropout ratio for the classification head.
106
+
107
+ Examples:
108
+
109
+ ```python
110
+ >>> from transformers import ElectraConfig, ElectraModel
111
+
112
+ >>> # Initializing a ELECTRA electra-base-uncased style configuration
113
+ >>> configuration = ElectraConfig()
114
+
115
+ >>> # Initializing a model (with random weights) from the electra-base-uncased style configuration
116
+ >>> model = ElectraModel(configuration)
117
+
118
+ >>> # Accessing the model configuration
119
+ >>> configuration = model.config
120
+ ```"""
121
+
122
+ model_type = "electra"
123
+
124
+ def __init__(
125
+ self,
126
+ vocab_size=30522,
127
+ embedding_size=128,
128
+ hidden_size=256,
129
+ num_hidden_layers=12,
130
+ num_attention_heads=4,
131
+ intermediate_size=1024,
132
+ hidden_act="gelu",
133
+ hidden_dropout_prob=0.1,
134
+ attention_probs_dropout_prob=0.1,
135
+ max_position_embeddings=512,
136
+ type_vocab_size=2,
137
+ initializer_range=0.02,
138
+ layer_norm_eps=1e-12,
139
+ summary_type="first",
140
+ summary_use_proj=True,
141
+ summary_activation="gelu",
142
+ summary_last_dropout=0.1,
143
+ pad_token_id=0,
144
+ position_embedding_type="absolute",
145
+ use_cache=True,
146
+ classifier_dropout=None,
147
+ **kwargs,
148
+ ):
149
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
150
+
151
+ self.vocab_size = vocab_size
152
+ self.embedding_size = embedding_size
153
+ self.hidden_size = hidden_size
154
+ self.num_hidden_layers = num_hidden_layers
155
+ self.num_attention_heads = num_attention_heads
156
+ self.intermediate_size = intermediate_size
157
+ self.hidden_act = hidden_act
158
+ self.hidden_dropout_prob = hidden_dropout_prob
159
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
160
+ self.max_position_embeddings = max_position_embeddings
161
+ self.type_vocab_size = type_vocab_size
162
+ self.initializer_range = initializer_range
163
+ self.layer_norm_eps = layer_norm_eps
164
+
165
+ self.summary_type = summary_type
166
+ self.summary_use_proj = summary_use_proj
167
+ self.summary_activation = summary_activation
168
+ self.summary_last_dropout = summary_last_dropout
169
+ self.position_embedding_type = position_embedding_type
170
+ self.use_cache = use_cache
171
+ self.classifier_dropout = classifier_dropout
172
+
173
+
174
+ class ElectraOnnxConfig(OnnxConfig):
175
+ @property
176
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
177
+ if self.task == "multiple-choice":
178
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
179
+ else:
180
+ dynamic_axis = {0: "batch", 1: "sequence"}
181
+ return OrderedDict(
182
+ [
183
+ ("input_ids", dynamic_axis),
184
+ ("attention_mask", dynamic_axis),
185
+ ("token_type_ids", dynamic_axis),
186
+ ]
187
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ELECTRA checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator):
30
+ # Initialise PyTorch model
31
+ config = ElectraConfig.from_json_file(config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+
34
+ if discriminator_or_generator == "discriminator":
35
+ model = ElectraForPreTraining(config)
36
+ elif discriminator_or_generator == "generator":
37
+ model = ElectraForMaskedLM(config)
38
+ else:
39
+ raise ValueError("The discriminator_or_generator argument should be either 'discriminator' or 'generator'")
40
+
41
+ # Load weights from tf checkpoint
42
+ load_tf_weights_in_electra(
43
+ model, config, tf_checkpoint_path, discriminator_or_generator=discriminator_or_generator
44
+ )
45
+
46
+ # Save pytorch-model
47
+ print(f"Save PyTorch model to {pytorch_dump_path}")
48
+ torch.save(model.state_dict(), pytorch_dump_path)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ parser = argparse.ArgumentParser()
53
+ # Required parameters
54
+ parser.add_argument(
55
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
56
+ )
57
+ parser.add_argument(
58
+ "--config_file",
59
+ default=None,
60
+ type=str,
61
+ required=True,
62
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
63
+ )
64
+ parser.add_argument(
65
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
66
+ )
67
+ parser.add_argument(
68
+ "--discriminator_or_generator",
69
+ default=None,
70
+ type=str,
71
+ required=True,
72
+ help=(
73
+ "Whether to export the generator or the discriminator. Should be a string, either 'discriminator' or "
74
+ "'generator'."
75
+ ),
76
+ )
77
+ args = parser.parse_args()
78
+ convert_tf_checkpoint_to_pytorch(
79
+ args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.discriminator_or_generator
80
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/modeling_electra.py ADDED
@@ -0,0 +1,1679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ELECTRA model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN, get_activation
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithCrossAttentions,
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ MaskedLMOutput,
33
+ MultipleChoiceModelOutput,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutput,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel, SequenceSummary
39
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
40
+ from ...utils import (
41
+ ModelOutput,
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_electra import ElectraConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
54
+ _CONFIG_FOR_DOC = "ElectraConfig"
55
+
56
+
57
+ from ..deprecated._archive_maps import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
61
+ """Load tf checkpoints in a pytorch model."""
62
+ try:
63
+ import re
64
+
65
+ import numpy as np
66
+ import tensorflow as tf
67
+ except ImportError:
68
+ logger.error(
69
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
70
+ "https://www.tensorflow.org/install/ for installation instructions."
71
+ )
72
+ raise
73
+ tf_path = os.path.abspath(tf_checkpoint_path)
74
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
75
+ # Load weights from TF model
76
+ init_vars = tf.train.list_variables(tf_path)
77
+ names = []
78
+ arrays = []
79
+ for name, shape in init_vars:
80
+ logger.info(f"Loading TF weight {name} with shape {shape}")
81
+ array = tf.train.load_variable(tf_path, name)
82
+ names.append(name)
83
+ arrays.append(array)
84
+ for name, array in zip(names, arrays):
85
+ original_name: str = name
86
+
87
+ try:
88
+ if isinstance(model, ElectraForMaskedLM):
89
+ name = name.replace("electra/embeddings/", "generator/embeddings/")
90
+
91
+ if discriminator_or_generator == "generator":
92
+ name = name.replace("electra/", "discriminator/")
93
+ name = name.replace("generator/", "electra/")
94
+
95
+ name = name.replace("dense_1", "dense_prediction")
96
+ name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")
97
+
98
+ name = name.split("/")
99
+ # print(original_name, name)
100
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
101
+ # which are not required for using pretrained model
102
+ if any(n in ["global_step", "temperature"] for n in name):
103
+ logger.info(f"Skipping {original_name}")
104
+ continue
105
+ pointer = model
106
+ for m_name in name:
107
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
108
+ scope_names = re.split(r"_(\d+)", m_name)
109
+ else:
110
+ scope_names = [m_name]
111
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
112
+ pointer = getattr(pointer, "weight")
113
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
114
+ pointer = getattr(pointer, "bias")
115
+ elif scope_names[0] == "output_weights":
116
+ pointer = getattr(pointer, "weight")
117
+ elif scope_names[0] == "squad":
118
+ pointer = getattr(pointer, "classifier")
119
+ else:
120
+ pointer = getattr(pointer, scope_names[0])
121
+ if len(scope_names) >= 2:
122
+ num = int(scope_names[1])
123
+ pointer = pointer[num]
124
+ if m_name.endswith("_embeddings"):
125
+ pointer = getattr(pointer, "weight")
126
+ elif m_name == "kernel":
127
+ array = np.transpose(array)
128
+ try:
129
+ if pointer.shape != array.shape:
130
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
131
+ except ValueError as e:
132
+ e.args += (pointer.shape, array.shape)
133
+ raise
134
+ print(f"Initialize PyTorch weight {name}", original_name)
135
+ pointer.data = torch.from_numpy(array)
136
+ except AttributeError as e:
137
+ print(f"Skipping {original_name}", name, e)
138
+ continue
139
+ return model
140
+
141
+
142
+ class ElectraEmbeddings(nn.Module):
143
+ """Construct the embeddings from word, position and token_type embeddings."""
144
+
145
+ def __init__(self, config):
146
+ super().__init__()
147
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
148
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
149
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
150
+
151
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
152
+ # any TensorFlow checkpoint file
153
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
154
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
155
+
156
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
157
+ self.register_buffer(
158
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
159
+ )
160
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
161
+ self.register_buffer(
162
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
163
+ )
164
+
165
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
166
+ def forward(
167
+ self,
168
+ input_ids: Optional[torch.LongTensor] = None,
169
+ token_type_ids: Optional[torch.LongTensor] = None,
170
+ position_ids: Optional[torch.LongTensor] = None,
171
+ inputs_embeds: Optional[torch.FloatTensor] = None,
172
+ past_key_values_length: int = 0,
173
+ ) -> torch.Tensor:
174
+ if input_ids is not None:
175
+ input_shape = input_ids.size()
176
+ else:
177
+ input_shape = inputs_embeds.size()[:-1]
178
+
179
+ seq_length = input_shape[1]
180
+
181
+ if position_ids is None:
182
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
183
+
184
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
185
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
186
+ # issue #5664
187
+ if token_type_ids is None:
188
+ if hasattr(self, "token_type_ids"):
189
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
190
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
191
+ token_type_ids = buffered_token_type_ids_expanded
192
+ else:
193
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
194
+
195
+ if inputs_embeds is None:
196
+ inputs_embeds = self.word_embeddings(input_ids)
197
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
198
+
199
+ embeddings = inputs_embeds + token_type_embeddings
200
+ if self.position_embedding_type == "absolute":
201
+ position_embeddings = self.position_embeddings(position_ids)
202
+ embeddings += position_embeddings
203
+ embeddings = self.LayerNorm(embeddings)
204
+ embeddings = self.dropout(embeddings)
205
+ return embeddings
206
+
207
+
208
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra
209
+ class ElectraSelfAttention(nn.Module):
210
+ def __init__(self, config, position_embedding_type=None):
211
+ super().__init__()
212
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
213
+ raise ValueError(
214
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
215
+ f"heads ({config.num_attention_heads})"
216
+ )
217
+
218
+ self.num_attention_heads = config.num_attention_heads
219
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
220
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
221
+
222
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
223
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
224
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
225
+
226
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
227
+ self.position_embedding_type = position_embedding_type or getattr(
228
+ config, "position_embedding_type", "absolute"
229
+ )
230
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
231
+ self.max_position_embeddings = config.max_position_embeddings
232
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
233
+
234
+ self.is_decoder = config.is_decoder
235
+
236
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
237
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
238
+ x = x.view(new_x_shape)
239
+ return x.permute(0, 2, 1, 3)
240
+
241
+ def forward(
242
+ self,
243
+ hidden_states: torch.Tensor,
244
+ attention_mask: Optional[torch.FloatTensor] = None,
245
+ head_mask: Optional[torch.FloatTensor] = None,
246
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
247
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
248
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
249
+ output_attentions: Optional[bool] = False,
250
+ ) -> Tuple[torch.Tensor]:
251
+ mixed_query_layer = self.query(hidden_states)
252
+
253
+ # If this is instantiated as a cross-attention module, the keys
254
+ # and values come from an encoder; the attention mask needs to be
255
+ # such that the encoder's padding tokens are not attended to.
256
+ is_cross_attention = encoder_hidden_states is not None
257
+
258
+ if is_cross_attention and past_key_value is not None:
259
+ # reuse k,v, cross_attentions
260
+ key_layer = past_key_value[0]
261
+ value_layer = past_key_value[1]
262
+ attention_mask = encoder_attention_mask
263
+ elif is_cross_attention:
264
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
265
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
266
+ attention_mask = encoder_attention_mask
267
+ elif past_key_value is not None:
268
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
269
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
270
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
271
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
272
+ else:
273
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
274
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
275
+
276
+ query_layer = self.transpose_for_scores(mixed_query_layer)
277
+
278
+ use_cache = past_key_value is not None
279
+ if self.is_decoder:
280
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
281
+ # Further calls to cross_attention layer can then reuse all cross-attention
282
+ # key/value_states (first "if" case)
283
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
284
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
285
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
286
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
287
+ past_key_value = (key_layer, value_layer)
288
+
289
+ # Take the dot product between "query" and "key" to get the raw attention scores.
290
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
291
+
292
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
293
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
294
+ if use_cache:
295
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
296
+ -1, 1
297
+ )
298
+ else:
299
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
300
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
301
+ distance = position_ids_l - position_ids_r
302
+
303
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
304
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
305
+
306
+ if self.position_embedding_type == "relative_key":
307
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
308
+ attention_scores = attention_scores + relative_position_scores
309
+ elif self.position_embedding_type == "relative_key_query":
310
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
311
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
312
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
313
+
314
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
315
+ if attention_mask is not None:
316
+ # Apply the attention mask is (precomputed for all layers in ElectraModel forward() function)
317
+ attention_scores = attention_scores + attention_mask
318
+
319
+ # Normalize the attention scores to probabilities.
320
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
321
+
322
+ # This is actually dropping out entire tokens to attend to, which might
323
+ # seem a bit unusual, but is taken from the original Transformer paper.
324
+ attention_probs = self.dropout(attention_probs)
325
+
326
+ # Mask heads if we want to
327
+ if head_mask is not None:
328
+ attention_probs = attention_probs * head_mask
329
+
330
+ context_layer = torch.matmul(attention_probs, value_layer)
331
+
332
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
333
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
334
+ context_layer = context_layer.view(new_context_layer_shape)
335
+
336
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
337
+
338
+ if self.is_decoder:
339
+ outputs = outputs + (past_key_value,)
340
+ return outputs
341
+
342
+
343
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
344
+ class ElectraSelfOutput(nn.Module):
345
+ def __init__(self, config):
346
+ super().__init__()
347
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
348
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
349
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
350
+
351
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
352
+ hidden_states = self.dense(hidden_states)
353
+ hidden_states = self.dropout(hidden_states)
354
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
355
+ return hidden_states
356
+
357
+
358
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Electra
359
+ class ElectraAttention(nn.Module):
360
+ def __init__(self, config, position_embedding_type=None):
361
+ super().__init__()
362
+ self.self = ElectraSelfAttention(config, position_embedding_type=position_embedding_type)
363
+ self.output = ElectraSelfOutput(config)
364
+ self.pruned_heads = set()
365
+
366
+ def prune_heads(self, heads):
367
+ if len(heads) == 0:
368
+ return
369
+ heads, index = find_pruneable_heads_and_indices(
370
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
371
+ )
372
+
373
+ # Prune linear layers
374
+ self.self.query = prune_linear_layer(self.self.query, index)
375
+ self.self.key = prune_linear_layer(self.self.key, index)
376
+ self.self.value = prune_linear_layer(self.self.value, index)
377
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
378
+
379
+ # Update hyper params and store pruned heads
380
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
381
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
382
+ self.pruned_heads = self.pruned_heads.union(heads)
383
+
384
+ def forward(
385
+ self,
386
+ hidden_states: torch.Tensor,
387
+ attention_mask: Optional[torch.FloatTensor] = None,
388
+ head_mask: Optional[torch.FloatTensor] = None,
389
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
390
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
391
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
392
+ output_attentions: Optional[bool] = False,
393
+ ) -> Tuple[torch.Tensor]:
394
+ self_outputs = self.self(
395
+ hidden_states,
396
+ attention_mask,
397
+ head_mask,
398
+ encoder_hidden_states,
399
+ encoder_attention_mask,
400
+ past_key_value,
401
+ output_attentions,
402
+ )
403
+ attention_output = self.output(self_outputs[0], hidden_states)
404
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
405
+ return outputs
406
+
407
+
408
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
409
+ class ElectraIntermediate(nn.Module):
410
+ def __init__(self, config):
411
+ super().__init__()
412
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
413
+ if isinstance(config.hidden_act, str):
414
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
415
+ else:
416
+ self.intermediate_act_fn = config.hidden_act
417
+
418
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
419
+ hidden_states = self.dense(hidden_states)
420
+ hidden_states = self.intermediate_act_fn(hidden_states)
421
+ return hidden_states
422
+
423
+
424
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
425
+ class ElectraOutput(nn.Module):
426
+ def __init__(self, config):
427
+ super().__init__()
428
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
429
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
430
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
431
+
432
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
433
+ hidden_states = self.dense(hidden_states)
434
+ hidden_states = self.dropout(hidden_states)
435
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
436
+ return hidden_states
437
+
438
+
439
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Electra
440
+ class ElectraLayer(nn.Module):
441
+ def __init__(self, config):
442
+ super().__init__()
443
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
444
+ self.seq_len_dim = 1
445
+ self.attention = ElectraAttention(config)
446
+ self.is_decoder = config.is_decoder
447
+ self.add_cross_attention = config.add_cross_attention
448
+ if self.add_cross_attention:
449
+ if not self.is_decoder:
450
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
451
+ self.crossattention = ElectraAttention(config, position_embedding_type="absolute")
452
+ self.intermediate = ElectraIntermediate(config)
453
+ self.output = ElectraOutput(config)
454
+
455
+ def forward(
456
+ self,
457
+ hidden_states: torch.Tensor,
458
+ attention_mask: Optional[torch.FloatTensor] = None,
459
+ head_mask: Optional[torch.FloatTensor] = None,
460
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
461
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
462
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
463
+ output_attentions: Optional[bool] = False,
464
+ ) -> Tuple[torch.Tensor]:
465
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
466
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
467
+ self_attention_outputs = self.attention(
468
+ hidden_states,
469
+ attention_mask,
470
+ head_mask,
471
+ output_attentions=output_attentions,
472
+ past_key_value=self_attn_past_key_value,
473
+ )
474
+ attention_output = self_attention_outputs[0]
475
+
476
+ # if decoder, the last output is tuple of self-attn cache
477
+ if self.is_decoder:
478
+ outputs = self_attention_outputs[1:-1]
479
+ present_key_value = self_attention_outputs[-1]
480
+ else:
481
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
482
+
483
+ cross_attn_present_key_value = None
484
+ if self.is_decoder and encoder_hidden_states is not None:
485
+ if not hasattr(self, "crossattention"):
486
+ raise ValueError(
487
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
488
+ " by setting `config.add_cross_attention=True`"
489
+ )
490
+
491
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
492
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
493
+ cross_attention_outputs = self.crossattention(
494
+ attention_output,
495
+ attention_mask,
496
+ head_mask,
497
+ encoder_hidden_states,
498
+ encoder_attention_mask,
499
+ cross_attn_past_key_value,
500
+ output_attentions,
501
+ )
502
+ attention_output = cross_attention_outputs[0]
503
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
504
+
505
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
506
+ cross_attn_present_key_value = cross_attention_outputs[-1]
507
+ present_key_value = present_key_value + cross_attn_present_key_value
508
+
509
+ layer_output = apply_chunking_to_forward(
510
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
511
+ )
512
+ outputs = (layer_output,) + outputs
513
+
514
+ # if decoder, return the attn key/values as the last output
515
+ if self.is_decoder:
516
+ outputs = outputs + (present_key_value,)
517
+
518
+ return outputs
519
+
520
+ def feed_forward_chunk(self, attention_output):
521
+ intermediate_output = self.intermediate(attention_output)
522
+ layer_output = self.output(intermediate_output, attention_output)
523
+ return layer_output
524
+
525
+
526
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra
527
+ class ElectraEncoder(nn.Module):
528
+ def __init__(self, config):
529
+ super().__init__()
530
+ self.config = config
531
+ self.layer = nn.ModuleList([ElectraLayer(config) for _ in range(config.num_hidden_layers)])
532
+ self.gradient_checkpointing = False
533
+
534
+ def forward(
535
+ self,
536
+ hidden_states: torch.Tensor,
537
+ attention_mask: Optional[torch.FloatTensor] = None,
538
+ head_mask: Optional[torch.FloatTensor] = None,
539
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
540
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
541
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
542
+ use_cache: Optional[bool] = None,
543
+ output_attentions: Optional[bool] = False,
544
+ output_hidden_states: Optional[bool] = False,
545
+ return_dict: Optional[bool] = True,
546
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
547
+ all_hidden_states = () if output_hidden_states else None
548
+ all_self_attentions = () if output_attentions else None
549
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
550
+
551
+ if self.gradient_checkpointing and self.training:
552
+ if use_cache:
553
+ logger.warning_once(
554
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
555
+ )
556
+ use_cache = False
557
+
558
+ next_decoder_cache = () if use_cache else None
559
+ for i, layer_module in enumerate(self.layer):
560
+ if output_hidden_states:
561
+ all_hidden_states = all_hidden_states + (hidden_states,)
562
+
563
+ layer_head_mask = head_mask[i] if head_mask is not None else None
564
+ past_key_value = past_key_values[i] if past_key_values is not None else None
565
+
566
+ if self.gradient_checkpointing and self.training:
567
+ layer_outputs = self._gradient_checkpointing_func(
568
+ layer_module.__call__,
569
+ hidden_states,
570
+ attention_mask,
571
+ layer_head_mask,
572
+ encoder_hidden_states,
573
+ encoder_attention_mask,
574
+ past_key_value,
575
+ output_attentions,
576
+ )
577
+ else:
578
+ layer_outputs = layer_module(
579
+ hidden_states,
580
+ attention_mask,
581
+ layer_head_mask,
582
+ encoder_hidden_states,
583
+ encoder_attention_mask,
584
+ past_key_value,
585
+ output_attentions,
586
+ )
587
+
588
+ hidden_states = layer_outputs[0]
589
+ if use_cache:
590
+ next_decoder_cache += (layer_outputs[-1],)
591
+ if output_attentions:
592
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
593
+ if self.config.add_cross_attention:
594
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
595
+
596
+ if output_hidden_states:
597
+ all_hidden_states = all_hidden_states + (hidden_states,)
598
+
599
+ if not return_dict:
600
+ return tuple(
601
+ v
602
+ for v in [
603
+ hidden_states,
604
+ next_decoder_cache,
605
+ all_hidden_states,
606
+ all_self_attentions,
607
+ all_cross_attentions,
608
+ ]
609
+ if v is not None
610
+ )
611
+ return BaseModelOutputWithPastAndCrossAttentions(
612
+ last_hidden_state=hidden_states,
613
+ past_key_values=next_decoder_cache,
614
+ hidden_states=all_hidden_states,
615
+ attentions=all_self_attentions,
616
+ cross_attentions=all_cross_attentions,
617
+ )
618
+
619
+
620
+ class ElectraDiscriminatorPredictions(nn.Module):
621
+ """Prediction module for the discriminator, made up of two dense layers."""
622
+
623
+ def __init__(self, config):
624
+ super().__init__()
625
+
626
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
627
+ self.activation = get_activation(config.hidden_act)
628
+ self.dense_prediction = nn.Linear(config.hidden_size, 1)
629
+ self.config = config
630
+
631
+ def forward(self, discriminator_hidden_states):
632
+ hidden_states = self.dense(discriminator_hidden_states)
633
+ hidden_states = self.activation(hidden_states)
634
+ logits = self.dense_prediction(hidden_states).squeeze(-1)
635
+
636
+ return logits
637
+
638
+
639
+ class ElectraGeneratorPredictions(nn.Module):
640
+ """Prediction module for the generator, made up of two dense layers."""
641
+
642
+ def __init__(self, config):
643
+ super().__init__()
644
+
645
+ self.activation = get_activation("gelu")
646
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
647
+ self.dense = nn.Linear(config.hidden_size, config.embedding_size)
648
+
649
+ def forward(self, generator_hidden_states):
650
+ hidden_states = self.dense(generator_hidden_states)
651
+ hidden_states = self.activation(hidden_states)
652
+ hidden_states = self.LayerNorm(hidden_states)
653
+
654
+ return hidden_states
655
+
656
+
657
+ class ElectraPreTrainedModel(PreTrainedModel):
658
+ """
659
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
660
+ models.
661
+ """
662
+
663
+ config_class = ElectraConfig
664
+ load_tf_weights = load_tf_weights_in_electra
665
+ base_model_prefix = "electra"
666
+ supports_gradient_checkpointing = True
667
+
668
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
669
+ def _init_weights(self, module):
670
+ """Initialize the weights"""
671
+ if isinstance(module, nn.Linear):
672
+ # Slightly different from the TF version which uses truncated_normal for initialization
673
+ # cf https://github.com/pytorch/pytorch/pull/5617
674
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
675
+ if module.bias is not None:
676
+ module.bias.data.zero_()
677
+ elif isinstance(module, nn.Embedding):
678
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
679
+ if module.padding_idx is not None:
680
+ module.weight.data[module.padding_idx].zero_()
681
+ elif isinstance(module, nn.LayerNorm):
682
+ module.bias.data.zero_()
683
+ module.weight.data.fill_(1.0)
684
+
685
+
686
+ @dataclass
687
+ class ElectraForPreTrainingOutput(ModelOutput):
688
+ """
689
+ Output type of [`ElectraForPreTraining`].
690
+
691
+ Args:
692
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
693
+ Total loss of the ELECTRA objective.
694
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
695
+ Prediction scores of the head (scores for each token before SoftMax).
696
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
697
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
698
+ shape `(batch_size, sequence_length, hidden_size)`.
699
+
700
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
701
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
702
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
703
+ sequence_length)`.
704
+
705
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
706
+ heads.
707
+ """
708
+
709
+ loss: Optional[torch.FloatTensor] = None
710
+ logits: torch.FloatTensor = None
711
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
712
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
713
+
714
+
715
+ ELECTRA_START_DOCSTRING = r"""
716
+
717
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
718
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
719
+ etc.)
720
+
721
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
722
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
723
+ and behavior.
724
+
725
+ Parameters:
726
+ config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
727
+ Initializing with a config file does not load the weights associated with the model, only the
728
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
729
+ """
730
+
731
+ ELECTRA_INPUTS_DOCSTRING = r"""
732
+ Args:
733
+ input_ids (`torch.LongTensor` of shape `({0})`):
734
+ Indices of input sequence tokens in the vocabulary.
735
+
736
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
737
+ [`PreTrainedTokenizer.__call__`] for details.
738
+
739
+ [What are input IDs?](../glossary#input-ids)
740
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
741
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
742
+
743
+ - 1 for tokens that are **not masked**,
744
+ - 0 for tokens that are **masked**.
745
+
746
+ [What are attention masks?](../glossary#attention-mask)
747
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
748
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
749
+ 1]`:
750
+
751
+ - 0 corresponds to a *sentence A* token,
752
+ - 1 corresponds to a *sentence B* token.
753
+
754
+ [What are token type IDs?](../glossary#token-type-ids)
755
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
756
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
757
+ config.max_position_embeddings - 1]`.
758
+
759
+ [What are position IDs?](../glossary#position-ids)
760
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
761
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
762
+
763
+ - 1 indicates the head is **not masked**,
764
+ - 0 indicates the head is **masked**.
765
+
766
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
767
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
768
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
769
+ model's internal embedding lookup matrix.
770
+ encoder_hidden_states (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
771
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
772
+ the model is configured as a decoder.
773
+ encoder_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
774
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
775
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
776
+
777
+ - 1 indicates the head is **not masked**,
778
+ - 0 indicates the head is **masked**.
779
+
780
+ output_attentions (`bool`, *optional*):
781
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
782
+ tensors for more detail.
783
+ output_hidden_states (`bool`, *optional*):
784
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
785
+ more detail.
786
+ return_dict (`bool`, *optional*):
787
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
788
+ """
789
+
790
+
791
+ @add_start_docstrings(
792
+ "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
793
+ "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
794
+ "hidden size and embedding size are different. "
795
+ ""
796
+ "Both the generator and discriminator checkpoints may be loaded into this model.",
797
+ ELECTRA_START_DOCSTRING,
798
+ )
799
+ class ElectraModel(ElectraPreTrainedModel):
800
+ def __init__(self, config):
801
+ super().__init__(config)
802
+ self.embeddings = ElectraEmbeddings(config)
803
+
804
+ if config.embedding_size != config.hidden_size:
805
+ self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
806
+
807
+ self.encoder = ElectraEncoder(config)
808
+ self.config = config
809
+ # Initialize weights and apply final processing
810
+ self.post_init()
811
+
812
+ def get_input_embeddings(self):
813
+ return self.embeddings.word_embeddings
814
+
815
+ def set_input_embeddings(self, value):
816
+ self.embeddings.word_embeddings = value
817
+
818
+ def _prune_heads(self, heads_to_prune):
819
+ """
820
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
821
+ class PreTrainedModel
822
+ """
823
+ for layer, heads in heads_to_prune.items():
824
+ self.encoder.layer[layer].attention.prune_heads(heads)
825
+
826
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
827
+ @add_code_sample_docstrings(
828
+ checkpoint=_CHECKPOINT_FOR_DOC,
829
+ output_type=BaseModelOutputWithCrossAttentions,
830
+ config_class=_CONFIG_FOR_DOC,
831
+ )
832
+ def forward(
833
+ self,
834
+ input_ids: Optional[torch.Tensor] = None,
835
+ attention_mask: Optional[torch.Tensor] = None,
836
+ token_type_ids: Optional[torch.Tensor] = None,
837
+ position_ids: Optional[torch.Tensor] = None,
838
+ head_mask: Optional[torch.Tensor] = None,
839
+ inputs_embeds: Optional[torch.Tensor] = None,
840
+ encoder_hidden_states: Optional[torch.Tensor] = None,
841
+ encoder_attention_mask: Optional[torch.Tensor] = None,
842
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
843
+ use_cache: Optional[bool] = None,
844
+ output_attentions: Optional[bool] = None,
845
+ output_hidden_states: Optional[bool] = None,
846
+ return_dict: Optional[bool] = None,
847
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
848
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
849
+ output_hidden_states = (
850
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
851
+ )
852
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
853
+
854
+ if input_ids is not None and inputs_embeds is not None:
855
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
856
+ elif input_ids is not None:
857
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
858
+ input_shape = input_ids.size()
859
+ elif inputs_embeds is not None:
860
+ input_shape = inputs_embeds.size()[:-1]
861
+ else:
862
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
863
+
864
+ batch_size, seq_length = input_shape
865
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
866
+
867
+ # past_key_values_length
868
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
869
+
870
+ if attention_mask is None:
871
+ attention_mask = torch.ones(input_shape, device=device)
872
+ if token_type_ids is None:
873
+ if hasattr(self.embeddings, "token_type_ids"):
874
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
875
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
876
+ token_type_ids = buffered_token_type_ids_expanded
877
+ else:
878
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
879
+
880
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
881
+
882
+ # If a 2D or 3D attention mask is provided for the cross-attention
883
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
884
+ if self.config.is_decoder and encoder_hidden_states is not None:
885
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
886
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
887
+ if encoder_attention_mask is None:
888
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
889
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
890
+ else:
891
+ encoder_extended_attention_mask = None
892
+
893
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
894
+
895
+ hidden_states = self.embeddings(
896
+ input_ids=input_ids,
897
+ position_ids=position_ids,
898
+ token_type_ids=token_type_ids,
899
+ inputs_embeds=inputs_embeds,
900
+ past_key_values_length=past_key_values_length,
901
+ )
902
+
903
+ if hasattr(self, "embeddings_project"):
904
+ hidden_states = self.embeddings_project(hidden_states)
905
+
906
+ hidden_states = self.encoder(
907
+ hidden_states,
908
+ attention_mask=extended_attention_mask,
909
+ head_mask=head_mask,
910
+ encoder_hidden_states=encoder_hidden_states,
911
+ encoder_attention_mask=encoder_extended_attention_mask,
912
+ past_key_values=past_key_values,
913
+ use_cache=use_cache,
914
+ output_attentions=output_attentions,
915
+ output_hidden_states=output_hidden_states,
916
+ return_dict=return_dict,
917
+ )
918
+
919
+ return hidden_states
920
+
921
+
922
+ class ElectraClassificationHead(nn.Module):
923
+ """Head for sentence-level classification tasks."""
924
+
925
+ def __init__(self, config):
926
+ super().__init__()
927
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
928
+ classifier_dropout = (
929
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
930
+ )
931
+ self.activation = get_activation("gelu")
932
+ self.dropout = nn.Dropout(classifier_dropout)
933
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
934
+
935
+ def forward(self, features, **kwargs):
936
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
937
+ x = self.dropout(x)
938
+ x = self.dense(x)
939
+ x = self.activation(x) # although BERT uses tanh here, it seems Electra authors used gelu here
940
+ x = self.dropout(x)
941
+ x = self.out_proj(x)
942
+ return x
943
+
944
+
945
+ @add_start_docstrings(
946
+ """
947
+ ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the
948
+ pooled output) e.g. for GLUE tasks.
949
+ """,
950
+ ELECTRA_START_DOCSTRING,
951
+ )
952
+ class ElectraForSequenceClassification(ElectraPreTrainedModel):
953
+ def __init__(self, config):
954
+ super().__init__(config)
955
+ self.num_labels = config.num_labels
956
+ self.config = config
957
+ self.electra = ElectraModel(config)
958
+ self.classifier = ElectraClassificationHead(config)
959
+
960
+ # Initialize weights and apply final processing
961
+ self.post_init()
962
+
963
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
964
+ @add_code_sample_docstrings(
965
+ checkpoint="bhadresh-savani/electra-base-emotion",
966
+ output_type=SequenceClassifierOutput,
967
+ config_class=_CONFIG_FOR_DOC,
968
+ expected_output="'joy'",
969
+ expected_loss=0.06,
970
+ )
971
+ def forward(
972
+ self,
973
+ input_ids: Optional[torch.Tensor] = None,
974
+ attention_mask: Optional[torch.Tensor] = None,
975
+ token_type_ids: Optional[torch.Tensor] = None,
976
+ position_ids: Optional[torch.Tensor] = None,
977
+ head_mask: Optional[torch.Tensor] = None,
978
+ inputs_embeds: Optional[torch.Tensor] = None,
979
+ labels: Optional[torch.Tensor] = None,
980
+ output_attentions: Optional[bool] = None,
981
+ output_hidden_states: Optional[bool] = None,
982
+ return_dict: Optional[bool] = None,
983
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
984
+ r"""
985
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
986
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
987
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
988
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
989
+ """
990
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
991
+
992
+ discriminator_hidden_states = self.electra(
993
+ input_ids,
994
+ attention_mask=attention_mask,
995
+ token_type_ids=token_type_ids,
996
+ position_ids=position_ids,
997
+ head_mask=head_mask,
998
+ inputs_embeds=inputs_embeds,
999
+ output_attentions=output_attentions,
1000
+ output_hidden_states=output_hidden_states,
1001
+ return_dict=return_dict,
1002
+ )
1003
+
1004
+ sequence_output = discriminator_hidden_states[0]
1005
+ logits = self.classifier(sequence_output)
1006
+
1007
+ loss = None
1008
+ if labels is not None:
1009
+ if self.config.problem_type is None:
1010
+ if self.num_labels == 1:
1011
+ self.config.problem_type = "regression"
1012
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1013
+ self.config.problem_type = "single_label_classification"
1014
+ else:
1015
+ self.config.problem_type = "multi_label_classification"
1016
+
1017
+ if self.config.problem_type == "regression":
1018
+ loss_fct = MSELoss()
1019
+ if self.num_labels == 1:
1020
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1021
+ else:
1022
+ loss = loss_fct(logits, labels)
1023
+ elif self.config.problem_type == "single_label_classification":
1024
+ loss_fct = CrossEntropyLoss()
1025
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1026
+ elif self.config.problem_type == "multi_label_classification":
1027
+ loss_fct = BCEWithLogitsLoss()
1028
+ loss = loss_fct(logits, labels)
1029
+
1030
+ if not return_dict:
1031
+ output = (logits,) + discriminator_hidden_states[1:]
1032
+ return ((loss,) + output) if loss is not None else output
1033
+
1034
+ return SequenceClassifierOutput(
1035
+ loss=loss,
1036
+ logits=logits,
1037
+ hidden_states=discriminator_hidden_states.hidden_states,
1038
+ attentions=discriminator_hidden_states.attentions,
1039
+ )
1040
+
1041
+
1042
+ @add_start_docstrings(
1043
+ """
1044
+ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
1045
+
1046
+ It is recommended to load the discriminator checkpoint into that model.
1047
+ """,
1048
+ ELECTRA_START_DOCSTRING,
1049
+ )
1050
+ class ElectraForPreTraining(ElectraPreTrainedModel):
1051
+ def __init__(self, config):
1052
+ super().__init__(config)
1053
+
1054
+ self.electra = ElectraModel(config)
1055
+ self.discriminator_predictions = ElectraDiscriminatorPredictions(config)
1056
+ # Initialize weights and apply final processing
1057
+ self.post_init()
1058
+
1059
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1060
+ @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1061
+ def forward(
1062
+ self,
1063
+ input_ids: Optional[torch.Tensor] = None,
1064
+ attention_mask: Optional[torch.Tensor] = None,
1065
+ token_type_ids: Optional[torch.Tensor] = None,
1066
+ position_ids: Optional[torch.Tensor] = None,
1067
+ head_mask: Optional[torch.Tensor] = None,
1068
+ inputs_embeds: Optional[torch.Tensor] = None,
1069
+ labels: Optional[torch.Tensor] = None,
1070
+ output_attentions: Optional[bool] = None,
1071
+ output_hidden_states: Optional[bool] = None,
1072
+ return_dict: Optional[bool] = None,
1073
+ ) -> Union[Tuple[torch.Tensor], ElectraForPreTrainingOutput]:
1074
+ r"""
1075
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1076
+ Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see `input_ids` docstring)
1077
+ Indices should be in `[0, 1]`:
1078
+
1079
+ - 0 indicates the token is an original token,
1080
+ - 1 indicates the token was replaced.
1081
+
1082
+ Returns:
1083
+
1084
+ Examples:
1085
+
1086
+ ```python
1087
+ >>> from transformers import ElectraForPreTraining, AutoTokenizer
1088
+ >>> import torch
1089
+
1090
+ >>> discriminator = ElectraForPreTraining.from_pretrained("google/electra-base-discriminator")
1091
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-discriminator")
1092
+
1093
+ >>> sentence = "The quick brown fox jumps over the lazy dog"
1094
+ >>> fake_sentence = "The quick brown fox fake over the lazy dog"
1095
+
1096
+ >>> fake_tokens = tokenizer.tokenize(fake_sentence, add_special_tokens=True)
1097
+ >>> fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
1098
+ >>> discriminator_outputs = discriminator(fake_inputs)
1099
+ >>> predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)
1100
+
1101
+ >>> fake_tokens
1102
+ ['[CLS]', 'the', 'quick', 'brown', 'fox', 'fake', 'over', 'the', 'lazy', 'dog', '[SEP]']
1103
+
1104
+ >>> predictions.squeeze().tolist()
1105
+ [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
1106
+ ```"""
1107
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1108
+
1109
+ discriminator_hidden_states = self.electra(
1110
+ input_ids,
1111
+ attention_mask=attention_mask,
1112
+ token_type_ids=token_type_ids,
1113
+ position_ids=position_ids,
1114
+ head_mask=head_mask,
1115
+ inputs_embeds=inputs_embeds,
1116
+ output_attentions=output_attentions,
1117
+ output_hidden_states=output_hidden_states,
1118
+ return_dict=return_dict,
1119
+ )
1120
+ discriminator_sequence_output = discriminator_hidden_states[0]
1121
+
1122
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1123
+
1124
+ loss = None
1125
+ if labels is not None:
1126
+ loss_fct = nn.BCEWithLogitsLoss()
1127
+ if attention_mask is not None:
1128
+ active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
1129
+ active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
1130
+ active_labels = labels[active_loss]
1131
+ loss = loss_fct(active_logits, active_labels.float())
1132
+ else:
1133
+ loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
1134
+
1135
+ if not return_dict:
1136
+ output = (logits,) + discriminator_hidden_states[1:]
1137
+ return ((loss,) + output) if loss is not None else output
1138
+
1139
+ return ElectraForPreTrainingOutput(
1140
+ loss=loss,
1141
+ logits=logits,
1142
+ hidden_states=discriminator_hidden_states.hidden_states,
1143
+ attentions=discriminator_hidden_states.attentions,
1144
+ )
1145
+
1146
+
1147
+ @add_start_docstrings(
1148
+ """
1149
+ Electra model with a language modeling head on top.
1150
+
1151
+ Even though both the discriminator and generator may be loaded into this model, the generator is the only model of
1152
+ the two to have been trained for the masked language modeling task.
1153
+ """,
1154
+ ELECTRA_START_DOCSTRING,
1155
+ )
1156
+ class ElectraForMaskedLM(ElectraPreTrainedModel):
1157
+ _tied_weights_keys = ["generator_lm_head.weight"]
1158
+
1159
+ def __init__(self, config):
1160
+ super().__init__(config)
1161
+
1162
+ self.electra = ElectraModel(config)
1163
+ self.generator_predictions = ElectraGeneratorPredictions(config)
1164
+
1165
+ self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
1166
+ # Initialize weights and apply final processing
1167
+ self.post_init()
1168
+
1169
+ def get_output_embeddings(self):
1170
+ return self.generator_lm_head
1171
+
1172
+ def set_output_embeddings(self, word_embeddings):
1173
+ self.generator_lm_head = word_embeddings
1174
+
1175
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1176
+ @add_code_sample_docstrings(
1177
+ checkpoint="google/electra-small-generator",
1178
+ output_type=MaskedLMOutput,
1179
+ config_class=_CONFIG_FOR_DOC,
1180
+ mask="[MASK]",
1181
+ expected_output="'paris'",
1182
+ expected_loss=1.22,
1183
+ )
1184
+ def forward(
1185
+ self,
1186
+ input_ids: Optional[torch.Tensor] = None,
1187
+ attention_mask: Optional[torch.Tensor] = None,
1188
+ token_type_ids: Optional[torch.Tensor] = None,
1189
+ position_ids: Optional[torch.Tensor] = None,
1190
+ head_mask: Optional[torch.Tensor] = None,
1191
+ inputs_embeds: Optional[torch.Tensor] = None,
1192
+ labels: Optional[torch.Tensor] = None,
1193
+ output_attentions: Optional[bool] = None,
1194
+ output_hidden_states: Optional[bool] = None,
1195
+ return_dict: Optional[bool] = None,
1196
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1197
+ r"""
1198
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1199
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1200
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1201
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1202
+ """
1203
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1204
+
1205
+ generator_hidden_states = self.electra(
1206
+ input_ids,
1207
+ attention_mask=attention_mask,
1208
+ token_type_ids=token_type_ids,
1209
+ position_ids=position_ids,
1210
+ head_mask=head_mask,
1211
+ inputs_embeds=inputs_embeds,
1212
+ output_attentions=output_attentions,
1213
+ output_hidden_states=output_hidden_states,
1214
+ return_dict=return_dict,
1215
+ )
1216
+ generator_sequence_output = generator_hidden_states[0]
1217
+
1218
+ prediction_scores = self.generator_predictions(generator_sequence_output)
1219
+ prediction_scores = self.generator_lm_head(prediction_scores)
1220
+
1221
+ loss = None
1222
+ # Masked language modeling softmax layer
1223
+ if labels is not None:
1224
+ loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
1225
+ loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1226
+
1227
+ if not return_dict:
1228
+ output = (prediction_scores,) + generator_hidden_states[1:]
1229
+ return ((loss,) + output) if loss is not None else output
1230
+
1231
+ return MaskedLMOutput(
1232
+ loss=loss,
1233
+ logits=prediction_scores,
1234
+ hidden_states=generator_hidden_states.hidden_states,
1235
+ attentions=generator_hidden_states.attentions,
1236
+ )
1237
+
1238
+
1239
+ @add_start_docstrings(
1240
+ """
1241
+ Electra model with a token classification head on top.
1242
+
1243
+ Both the discriminator and generator may be loaded into this model.
1244
+ """,
1245
+ ELECTRA_START_DOCSTRING,
1246
+ )
1247
+ class ElectraForTokenClassification(ElectraPreTrainedModel):
1248
+ def __init__(self, config):
1249
+ super().__init__(config)
1250
+ self.num_labels = config.num_labels
1251
+
1252
+ self.electra = ElectraModel(config)
1253
+ classifier_dropout = (
1254
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1255
+ )
1256
+ self.dropout = nn.Dropout(classifier_dropout)
1257
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1258
+ # Initialize weights and apply final processing
1259
+ self.post_init()
1260
+
1261
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1262
+ @add_code_sample_docstrings(
1263
+ checkpoint="bhadresh-savani/electra-base-discriminator-finetuned-conll03-english",
1264
+ output_type=TokenClassifierOutput,
1265
+ config_class=_CONFIG_FOR_DOC,
1266
+ expected_output="['B-LOC', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-LOC', 'I-LOC']",
1267
+ expected_loss=0.11,
1268
+ )
1269
+ def forward(
1270
+ self,
1271
+ input_ids: Optional[torch.Tensor] = None,
1272
+ attention_mask: Optional[torch.Tensor] = None,
1273
+ token_type_ids: Optional[torch.Tensor] = None,
1274
+ position_ids: Optional[torch.Tensor] = None,
1275
+ head_mask: Optional[torch.Tensor] = None,
1276
+ inputs_embeds: Optional[torch.Tensor] = None,
1277
+ labels: Optional[torch.Tensor] = None,
1278
+ output_attentions: Optional[bool] = None,
1279
+ output_hidden_states: Optional[bool] = None,
1280
+ return_dict: Optional[bool] = None,
1281
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1282
+ r"""
1283
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1284
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1285
+ """
1286
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1287
+
1288
+ discriminator_hidden_states = self.electra(
1289
+ input_ids,
1290
+ attention_mask=attention_mask,
1291
+ token_type_ids=token_type_ids,
1292
+ position_ids=position_ids,
1293
+ head_mask=head_mask,
1294
+ inputs_embeds=inputs_embeds,
1295
+ output_attentions=output_attentions,
1296
+ output_hidden_states=output_hidden_states,
1297
+ return_dict=return_dict,
1298
+ )
1299
+ discriminator_sequence_output = discriminator_hidden_states[0]
1300
+
1301
+ discriminator_sequence_output = self.dropout(discriminator_sequence_output)
1302
+ logits = self.classifier(discriminator_sequence_output)
1303
+
1304
+ loss = None
1305
+ if labels is not None:
1306
+ loss_fct = CrossEntropyLoss()
1307
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1308
+
1309
+ if not return_dict:
1310
+ output = (logits,) + discriminator_hidden_states[1:]
1311
+ return ((loss,) + output) if loss is not None else output
1312
+
1313
+ return TokenClassifierOutput(
1314
+ loss=loss,
1315
+ logits=logits,
1316
+ hidden_states=discriminator_hidden_states.hidden_states,
1317
+ attentions=discriminator_hidden_states.attentions,
1318
+ )
1319
+
1320
+
1321
+ @add_start_docstrings(
1322
+ """
1323
+ ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1324
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1325
+ """,
1326
+ ELECTRA_START_DOCSTRING,
1327
+ )
1328
+ class ElectraForQuestionAnswering(ElectraPreTrainedModel):
1329
+ config_class = ElectraConfig
1330
+ base_model_prefix = "electra"
1331
+
1332
+ def __init__(self, config):
1333
+ super().__init__(config)
1334
+ self.num_labels = config.num_labels
1335
+
1336
+ self.electra = ElectraModel(config)
1337
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1338
+
1339
+ # Initialize weights and apply final processing
1340
+ self.post_init()
1341
+
1342
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1343
+ @add_code_sample_docstrings(
1344
+ checkpoint="bhadresh-savani/electra-base-squad2",
1345
+ output_type=QuestionAnsweringModelOutput,
1346
+ config_class=_CONFIG_FOR_DOC,
1347
+ qa_target_start_index=11,
1348
+ qa_target_end_index=12,
1349
+ expected_output="'a nice puppet'",
1350
+ expected_loss=2.64,
1351
+ )
1352
+ def forward(
1353
+ self,
1354
+ input_ids: Optional[torch.Tensor] = None,
1355
+ attention_mask: Optional[torch.Tensor] = None,
1356
+ token_type_ids: Optional[torch.Tensor] = None,
1357
+ position_ids: Optional[torch.Tensor] = None,
1358
+ head_mask: Optional[torch.Tensor] = None,
1359
+ inputs_embeds: Optional[torch.Tensor] = None,
1360
+ start_positions: Optional[torch.Tensor] = None,
1361
+ end_positions: Optional[torch.Tensor] = None,
1362
+ output_attentions: Optional[bool] = None,
1363
+ output_hidden_states: Optional[bool] = None,
1364
+ return_dict: Optional[bool] = None,
1365
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1366
+ r"""
1367
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1368
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1369
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1370
+ are not taken into account for computing the loss.
1371
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1372
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1373
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1374
+ are not taken into account for computing the loss.
1375
+ """
1376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1377
+
1378
+ discriminator_hidden_states = self.electra(
1379
+ input_ids,
1380
+ attention_mask=attention_mask,
1381
+ token_type_ids=token_type_ids,
1382
+ position_ids=position_ids,
1383
+ head_mask=head_mask,
1384
+ inputs_embeds=inputs_embeds,
1385
+ output_attentions=output_attentions,
1386
+ output_hidden_states=output_hidden_states,
1387
+ )
1388
+
1389
+ sequence_output = discriminator_hidden_states[0]
1390
+
1391
+ logits = self.qa_outputs(sequence_output)
1392
+ start_logits, end_logits = logits.split(1, dim=-1)
1393
+ start_logits = start_logits.squeeze(-1).contiguous()
1394
+ end_logits = end_logits.squeeze(-1).contiguous()
1395
+
1396
+ total_loss = None
1397
+ if start_positions is not None and end_positions is not None:
1398
+ # If we are on multi-GPU, split add a dimension
1399
+ if len(start_positions.size()) > 1:
1400
+ start_positions = start_positions.squeeze(-1)
1401
+ if len(end_positions.size()) > 1:
1402
+ end_positions = end_positions.squeeze(-1)
1403
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1404
+ ignored_index = start_logits.size(1)
1405
+ start_positions = start_positions.clamp(0, ignored_index)
1406
+ end_positions = end_positions.clamp(0, ignored_index)
1407
+
1408
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1409
+ start_loss = loss_fct(start_logits, start_positions)
1410
+ end_loss = loss_fct(end_logits, end_positions)
1411
+ total_loss = (start_loss + end_loss) / 2
1412
+
1413
+ if not return_dict:
1414
+ output = (
1415
+ start_logits,
1416
+ end_logits,
1417
+ ) + discriminator_hidden_states[1:]
1418
+ return ((total_loss,) + output) if total_loss is not None else output
1419
+
1420
+ return QuestionAnsweringModelOutput(
1421
+ loss=total_loss,
1422
+ start_logits=start_logits,
1423
+ end_logits=end_logits,
1424
+ hidden_states=discriminator_hidden_states.hidden_states,
1425
+ attentions=discriminator_hidden_states.attentions,
1426
+ )
1427
+
1428
+
1429
+ @add_start_docstrings(
1430
+ """
1431
+ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1432
+ softmax) e.g. for RocStories/SWAG tasks.
1433
+ """,
1434
+ ELECTRA_START_DOCSTRING,
1435
+ )
1436
+ class ElectraForMultipleChoice(ElectraPreTrainedModel):
1437
+ def __init__(self, config):
1438
+ super().__init__(config)
1439
+
1440
+ self.electra = ElectraModel(config)
1441
+ self.sequence_summary = SequenceSummary(config)
1442
+ self.classifier = nn.Linear(config.hidden_size, 1)
1443
+
1444
+ # Initialize weights and apply final processing
1445
+ self.post_init()
1446
+
1447
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1448
+ @add_code_sample_docstrings(
1449
+ checkpoint=_CHECKPOINT_FOR_DOC,
1450
+ output_type=MultipleChoiceModelOutput,
1451
+ config_class=_CONFIG_FOR_DOC,
1452
+ )
1453
+ def forward(
1454
+ self,
1455
+ input_ids: Optional[torch.Tensor] = None,
1456
+ attention_mask: Optional[torch.Tensor] = None,
1457
+ token_type_ids: Optional[torch.Tensor] = None,
1458
+ position_ids: Optional[torch.Tensor] = None,
1459
+ head_mask: Optional[torch.Tensor] = None,
1460
+ inputs_embeds: Optional[torch.Tensor] = None,
1461
+ labels: Optional[torch.Tensor] = None,
1462
+ output_attentions: Optional[bool] = None,
1463
+ output_hidden_states: Optional[bool] = None,
1464
+ return_dict: Optional[bool] = None,
1465
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1466
+ r"""
1467
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1468
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1469
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1470
+ `input_ids` above)
1471
+ """
1472
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1473
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1474
+
1475
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1476
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1477
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1478
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1479
+ inputs_embeds = (
1480
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1481
+ if inputs_embeds is not None
1482
+ else None
1483
+ )
1484
+
1485
+ discriminator_hidden_states = self.electra(
1486
+ input_ids,
1487
+ attention_mask=attention_mask,
1488
+ token_type_ids=token_type_ids,
1489
+ position_ids=position_ids,
1490
+ head_mask=head_mask,
1491
+ inputs_embeds=inputs_embeds,
1492
+ output_attentions=output_attentions,
1493
+ output_hidden_states=output_hidden_states,
1494
+ return_dict=return_dict,
1495
+ )
1496
+
1497
+ sequence_output = discriminator_hidden_states[0]
1498
+
1499
+ pooled_output = self.sequence_summary(sequence_output)
1500
+ logits = self.classifier(pooled_output)
1501
+ reshaped_logits = logits.view(-1, num_choices)
1502
+
1503
+ loss = None
1504
+ if labels is not None:
1505
+ loss_fct = CrossEntropyLoss()
1506
+ loss = loss_fct(reshaped_logits, labels)
1507
+
1508
+ if not return_dict:
1509
+ output = (reshaped_logits,) + discriminator_hidden_states[1:]
1510
+ return ((loss,) + output) if loss is not None else output
1511
+
1512
+ return MultipleChoiceModelOutput(
1513
+ loss=loss,
1514
+ logits=reshaped_logits,
1515
+ hidden_states=discriminator_hidden_states.hidden_states,
1516
+ attentions=discriminator_hidden_states.attentions,
1517
+ )
1518
+
1519
+
1520
+ @add_start_docstrings(
1521
+ """ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.""", ELECTRA_START_DOCSTRING
1522
+ )
1523
+ class ElectraForCausalLM(ElectraPreTrainedModel):
1524
+ _tied_weights_keys = ["generator_lm_head.weight"]
1525
+
1526
+ def __init__(self, config):
1527
+ super().__init__(config)
1528
+
1529
+ if not config.is_decoder:
1530
+ logger.warning("If you want to use `ElectraForCausalLM` as a standalone, add `is_decoder=True.`")
1531
+
1532
+ self.electra = ElectraModel(config)
1533
+ self.generator_predictions = ElectraGeneratorPredictions(config)
1534
+ self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
1535
+
1536
+ self.init_weights()
1537
+
1538
+ def get_output_embeddings(self):
1539
+ return self.generator_lm_head
1540
+
1541
+ def set_output_embeddings(self, new_embeddings):
1542
+ self.generator_lm_head = new_embeddings
1543
+
1544
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1545
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1546
+ def forward(
1547
+ self,
1548
+ input_ids: Optional[torch.Tensor] = None,
1549
+ attention_mask: Optional[torch.Tensor] = None,
1550
+ token_type_ids: Optional[torch.Tensor] = None,
1551
+ position_ids: Optional[torch.Tensor] = None,
1552
+ head_mask: Optional[torch.Tensor] = None,
1553
+ inputs_embeds: Optional[torch.Tensor] = None,
1554
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1555
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1556
+ labels: Optional[torch.Tensor] = None,
1557
+ past_key_values: Optional[List[torch.Tensor]] = None,
1558
+ use_cache: Optional[bool] = None,
1559
+ output_attentions: Optional[bool] = None,
1560
+ output_hidden_states: Optional[bool] = None,
1561
+ return_dict: Optional[bool] = None,
1562
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1563
+ r"""
1564
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1565
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1566
+ the model is configured as a decoder.
1567
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1568
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1569
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1570
+
1571
+ - 1 for tokens that are **not masked**,
1572
+ - 0 for tokens that are **masked**.
1573
+
1574
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1575
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1576
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1577
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1578
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1579
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1580
+
1581
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1582
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1583
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1584
+ use_cache (`bool`, *optional*):
1585
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1586
+ `past_key_values`).
1587
+
1588
+ Returns:
1589
+
1590
+ Example:
1591
+
1592
+ ```python
1593
+ >>> from transformers import AutoTokenizer, ElectraForCausalLM, ElectraConfig
1594
+ >>> import torch
1595
+
1596
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-generator")
1597
+ >>> config = ElectraConfig.from_pretrained("google/electra-base-generator")
1598
+ >>> config.is_decoder = True
1599
+ >>> model = ElectraForCausalLM.from_pretrained("google/electra-base-generator", config=config)
1600
+
1601
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1602
+ >>> outputs = model(**inputs)
1603
+
1604
+ >>> prediction_logits = outputs.logits
1605
+ ```"""
1606
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1607
+ if labels is not None:
1608
+ use_cache = False
1609
+
1610
+ outputs = self.electra(
1611
+ input_ids,
1612
+ attention_mask=attention_mask,
1613
+ token_type_ids=token_type_ids,
1614
+ position_ids=position_ids,
1615
+ head_mask=head_mask,
1616
+ inputs_embeds=inputs_embeds,
1617
+ encoder_hidden_states=encoder_hidden_states,
1618
+ encoder_attention_mask=encoder_attention_mask,
1619
+ past_key_values=past_key_values,
1620
+ use_cache=use_cache,
1621
+ output_attentions=output_attentions,
1622
+ output_hidden_states=output_hidden_states,
1623
+ return_dict=return_dict,
1624
+ )
1625
+
1626
+ sequence_output = outputs[0]
1627
+ prediction_scores = self.generator_lm_head(self.generator_predictions(sequence_output))
1628
+
1629
+ lm_loss = None
1630
+ if labels is not None:
1631
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1632
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1633
+ labels = labels[:, 1:].contiguous()
1634
+ loss_fct = CrossEntropyLoss()
1635
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1636
+
1637
+ if not return_dict:
1638
+ output = (prediction_scores,) + outputs[1:]
1639
+ return ((lm_loss,) + output) if lm_loss is not None else output
1640
+
1641
+ return CausalLMOutputWithCrossAttentions(
1642
+ loss=lm_loss,
1643
+ logits=prediction_scores,
1644
+ past_key_values=outputs.past_key_values,
1645
+ hidden_states=outputs.hidden_states,
1646
+ attentions=outputs.attentions,
1647
+ cross_attentions=outputs.cross_attentions,
1648
+ )
1649
+
1650
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM.prepare_inputs_for_generation
1651
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1652
+ input_shape = input_ids.shape
1653
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1654
+ if attention_mask is None:
1655
+ attention_mask = input_ids.new_ones(input_shape)
1656
+
1657
+ # cut decoder_input_ids if past_key_values is used
1658
+ if past_key_values is not None:
1659
+ past_length = past_key_values[0][0].shape[2]
1660
+
1661
+ # Some generation methods already pass only the last input ID
1662
+ if input_ids.shape[1] > past_length:
1663
+ remove_prefix_length = past_length
1664
+ else:
1665
+ # Default to old behavior: keep only final ID
1666
+ remove_prefix_length = input_ids.shape[1] - 1
1667
+
1668
+ input_ids = input_ids[:, remove_prefix_length:]
1669
+
1670
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1671
+
1672
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM._reorder_cache
1673
+ def _reorder_cache(self, past_key_values, beam_idx):
1674
+ reordered_past = ()
1675
+ for layer_past in past_key_values:
1676
+ reordered_past += (
1677
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1678
+ )
1679
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/modeling_flax_electra.py ADDED
@@ -0,0 +1,1601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Callable, Optional, Tuple
17
+
18
+ import flax
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen import combine_masks, make_causal_mask
25
+ from flax.linen import partitioning as nn_partitioning
26
+ from flax.linen.attention import dot_product_attention_weights
27
+ from flax.traverse_util import flatten_dict, unflatten_dict
28
+ from jax import lax
29
+
30
+ from ...modeling_flax_outputs import (
31
+ FlaxBaseModelOutput,
32
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
33
+ FlaxCausalLMOutputWithCrossAttentions,
34
+ FlaxMaskedLMOutput,
35
+ FlaxMultipleChoiceModelOutput,
36
+ FlaxQuestionAnsweringModelOutput,
37
+ FlaxSequenceClassifierOutput,
38
+ FlaxTokenClassifierOutput,
39
+ )
40
+ from ...modeling_flax_utils import (
41
+ ACT2FN,
42
+ FlaxPreTrainedModel,
43
+ append_call_sample_docstring,
44
+ append_replace_return_docstrings,
45
+ overwrite_call_docstring,
46
+ )
47
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
48
+ from .configuration_electra import ElectraConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
54
+ _CONFIG_FOR_DOC = "ElectraConfig"
55
+
56
+ remat = nn_partitioning.remat
57
+
58
+
59
+ @flax.struct.dataclass
60
+ class FlaxElectraForPreTrainingOutput(ModelOutput):
61
+ """
62
+ Output type of [`ElectraForPreTraining`].
63
+
64
+ Args:
65
+ logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
66
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
67
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
68
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
69
+ `(batch_size, sequence_length, hidden_size)`.
70
+
71
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
72
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
73
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
74
+ sequence_length)`.
75
+
76
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
77
+ heads.
78
+ """
79
+
80
+ logits: jnp.ndarray = None
81
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
82
+ attentions: Optional[Tuple[jnp.ndarray]] = None
83
+
84
+
85
+ ELECTRA_START_DOCSTRING = r"""
86
+
87
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
88
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
89
+
90
+ This model is also a Flax Linen
91
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
92
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
93
+
94
+ Finally, this model supports inherent JAX features such as:
95
+
96
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
97
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
98
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
99
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
100
+
101
+ Parameters:
102
+ config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
103
+ Initializing with a config file does not load the weights associated with the model, only the
104
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
105
+ """
106
+
107
+ ELECTRA_INPUTS_DOCSTRING = r"""
108
+ Args:
109
+ input_ids (`numpy.ndarray` of shape `({0})`):
110
+ Indices of input sequence tokens in the vocabulary.
111
+
112
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
113
+ [`PreTrainedTokenizer.__call__`] for details.
114
+
115
+ [What are input IDs?](../glossary#input-ids)
116
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
117
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
118
+
119
+ - 1 for tokens that are **not masked**,
120
+ - 0 for tokens that are **masked**.
121
+
122
+ [What are attention masks?](../glossary#attention-mask)
123
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
124
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
125
+ 1]`:
126
+
127
+ - 0 corresponds to a *sentence A* token,
128
+ - 1 corresponds to a *sentence B* token.
129
+
130
+ [What are token type IDs?](../glossary#token-type-ids)
131
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
132
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
133
+ config.max_position_embeddings - 1]`.
134
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
135
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
136
+
137
+ - 1 indicates the head is **not masked**,
138
+ - 0 indicates the head is **masked**.
139
+
140
+ return_dict (`bool`, *optional*):
141
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
142
+
143
+ """
144
+
145
+
146
+ class FlaxElectraEmbeddings(nn.Module):
147
+ """Construct the embeddings from word, position and token_type embeddings."""
148
+
149
+ config: ElectraConfig
150
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
151
+
152
+ def setup(self):
153
+ self.word_embeddings = nn.Embed(
154
+ self.config.vocab_size,
155
+ self.config.embedding_size,
156
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
157
+ )
158
+ self.position_embeddings = nn.Embed(
159
+ self.config.max_position_embeddings,
160
+ self.config.embedding_size,
161
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
162
+ )
163
+ self.token_type_embeddings = nn.Embed(
164
+ self.config.type_vocab_size,
165
+ self.config.embedding_size,
166
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
167
+ )
168
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
169
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
170
+
171
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings.__call__
172
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
173
+ # Embed
174
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
175
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
176
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
177
+
178
+ # Sum all embeddings
179
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
180
+
181
+ # Layer Norm
182
+ hidden_states = self.LayerNorm(hidden_states)
183
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
184
+ return hidden_states
185
+
186
+
187
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->Electra
188
+ class FlaxElectraSelfAttention(nn.Module):
189
+ config: ElectraConfig
190
+ causal: bool = False
191
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
192
+
193
+ def setup(self):
194
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
195
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
196
+ raise ValueError(
197
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
198
+ " : {self.config.num_attention_heads}"
199
+ )
200
+
201
+ self.query = nn.Dense(
202
+ self.config.hidden_size,
203
+ dtype=self.dtype,
204
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
205
+ )
206
+ self.key = nn.Dense(
207
+ self.config.hidden_size,
208
+ dtype=self.dtype,
209
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
210
+ )
211
+ self.value = nn.Dense(
212
+ self.config.hidden_size,
213
+ dtype=self.dtype,
214
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
215
+ )
216
+
217
+ if self.causal:
218
+ self.causal_mask = make_causal_mask(
219
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
220
+ )
221
+
222
+ def _split_heads(self, hidden_states):
223
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
224
+
225
+ def _merge_heads(self, hidden_states):
226
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
227
+
228
+ @nn.compact
229
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
230
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
231
+ """
232
+ This function takes projected key, value states from a single input token and concatenates the states to cached
233
+ states from previous steps. This function is slighly adapted from the official Flax repository:
234
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
235
+ """
236
+ # detect if we're initializing by absence of existing cache data.
237
+ is_initialized = self.has_variable("cache", "cached_key")
238
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
239
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
240
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
241
+
242
+ if is_initialized:
243
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
244
+ # update key, value caches with our new 1d spatial slices
245
+ cur_index = cache_index.value
246
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
247
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
248
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
249
+ cached_key.value = key
250
+ cached_value.value = value
251
+ num_updated_cache_vectors = query.shape[1]
252
+ cache_index.value = cache_index.value + num_updated_cache_vectors
253
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
254
+ pad_mask = jnp.broadcast_to(
255
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
256
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
257
+ )
258
+ attention_mask = combine_masks(pad_mask, attention_mask)
259
+ return key, value, attention_mask
260
+
261
+ def __call__(
262
+ self,
263
+ hidden_states,
264
+ attention_mask,
265
+ layer_head_mask,
266
+ key_value_states: Optional[jnp.ndarray] = None,
267
+ init_cache: bool = False,
268
+ deterministic=True,
269
+ output_attentions: bool = False,
270
+ ):
271
+ # if key_value_states are provided this layer is used as a cross-attention layer
272
+ # for the decoder
273
+ is_cross_attention = key_value_states is not None
274
+ batch_size = hidden_states.shape[0]
275
+
276
+ # get query proj
277
+ query_states = self.query(hidden_states)
278
+ # get key, value proj
279
+ if is_cross_attention:
280
+ # cross_attentions
281
+ key_states = self.key(key_value_states)
282
+ value_states = self.value(key_value_states)
283
+ else:
284
+ # self_attention
285
+ key_states = self.key(hidden_states)
286
+ value_states = self.value(hidden_states)
287
+
288
+ query_states = self._split_heads(query_states)
289
+ key_states = self._split_heads(key_states)
290
+ value_states = self._split_heads(value_states)
291
+
292
+ # handle cache prepare causal attention mask
293
+ if self.causal:
294
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
295
+ if self.has_variable("cache", "cached_key"):
296
+ mask_shift = self.variables["cache"]["cache_index"]
297
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
298
+ causal_mask = lax.dynamic_slice(
299
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
300
+ )
301
+ else:
302
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
303
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
304
+
305
+ # combine masks if needed
306
+ if attention_mask is not None and self.causal:
307
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
308
+ attention_mask = combine_masks(attention_mask, causal_mask)
309
+ elif self.causal:
310
+ attention_mask = causal_mask
311
+ elif attention_mask is not None:
312
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
313
+
314
+ # During fast autoregressive decoding, we feed one position at a time,
315
+ # and cache the keys and values step by step.
316
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
317
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
318
+ key_states, value_states, query_states, attention_mask
319
+ )
320
+
321
+ # Convert the boolean attention mask to an attention bias.
322
+ if attention_mask is not None:
323
+ # attention mask in the form of attention bias
324
+ attention_bias = lax.select(
325
+ attention_mask > 0,
326
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
327
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
328
+ )
329
+ else:
330
+ attention_bias = None
331
+
332
+ dropout_rng = None
333
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
334
+ dropout_rng = self.make_rng("dropout")
335
+
336
+ attn_weights = dot_product_attention_weights(
337
+ query_states,
338
+ key_states,
339
+ bias=attention_bias,
340
+ dropout_rng=dropout_rng,
341
+ dropout_rate=self.config.attention_probs_dropout_prob,
342
+ broadcast_dropout=True,
343
+ deterministic=deterministic,
344
+ dtype=self.dtype,
345
+ precision=None,
346
+ )
347
+
348
+ # Mask heads if we want to
349
+ if layer_head_mask is not None:
350
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
351
+
352
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
353
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
354
+
355
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
356
+ return outputs
357
+
358
+
359
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->Electra
360
+ class FlaxElectraSelfOutput(nn.Module):
361
+ config: ElectraConfig
362
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
363
+
364
+ def setup(self):
365
+ self.dense = nn.Dense(
366
+ self.config.hidden_size,
367
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
368
+ dtype=self.dtype,
369
+ )
370
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
371
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
372
+
373
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
374
+ hidden_states = self.dense(hidden_states)
375
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
376
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
377
+ return hidden_states
378
+
379
+
380
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->Electra
381
+ class FlaxElectraAttention(nn.Module):
382
+ config: ElectraConfig
383
+ causal: bool = False
384
+ dtype: jnp.dtype = jnp.float32
385
+
386
+ def setup(self):
387
+ self.self = FlaxElectraSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
388
+ self.output = FlaxElectraSelfOutput(self.config, dtype=self.dtype)
389
+
390
+ def __call__(
391
+ self,
392
+ hidden_states,
393
+ attention_mask,
394
+ layer_head_mask,
395
+ key_value_states=None,
396
+ init_cache=False,
397
+ deterministic=True,
398
+ output_attentions: bool = False,
399
+ ):
400
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
401
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
402
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
403
+ attn_outputs = self.self(
404
+ hidden_states,
405
+ attention_mask,
406
+ layer_head_mask=layer_head_mask,
407
+ key_value_states=key_value_states,
408
+ init_cache=init_cache,
409
+ deterministic=deterministic,
410
+ output_attentions=output_attentions,
411
+ )
412
+ attn_output = attn_outputs[0]
413
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
414
+
415
+ outputs = (hidden_states,)
416
+
417
+ if output_attentions:
418
+ outputs += (attn_outputs[1],)
419
+
420
+ return outputs
421
+
422
+
423
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->Electra
424
+ class FlaxElectraIntermediate(nn.Module):
425
+ config: ElectraConfig
426
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
427
+
428
+ def setup(self):
429
+ self.dense = nn.Dense(
430
+ self.config.intermediate_size,
431
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
432
+ dtype=self.dtype,
433
+ )
434
+ self.activation = ACT2FN[self.config.hidden_act]
435
+
436
+ def __call__(self, hidden_states):
437
+ hidden_states = self.dense(hidden_states)
438
+ hidden_states = self.activation(hidden_states)
439
+ return hidden_states
440
+
441
+
442
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->Electra
443
+ class FlaxElectraOutput(nn.Module):
444
+ config: ElectraConfig
445
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
446
+
447
+ def setup(self):
448
+ self.dense = nn.Dense(
449
+ self.config.hidden_size,
450
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
451
+ dtype=self.dtype,
452
+ )
453
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
454
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
455
+
456
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
457
+ hidden_states = self.dense(hidden_states)
458
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
459
+ hidden_states = self.LayerNorm(hidden_states + attention_output)
460
+ return hidden_states
461
+
462
+
463
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->Electra
464
+ class FlaxElectraLayer(nn.Module):
465
+ config: ElectraConfig
466
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
467
+
468
+ def setup(self):
469
+ self.attention = FlaxElectraAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
470
+ self.intermediate = FlaxElectraIntermediate(self.config, dtype=self.dtype)
471
+ self.output = FlaxElectraOutput(self.config, dtype=self.dtype)
472
+ if self.config.add_cross_attention:
473
+ self.crossattention = FlaxElectraAttention(self.config, causal=False, dtype=self.dtype)
474
+
475
+ def __call__(
476
+ self,
477
+ hidden_states,
478
+ attention_mask,
479
+ layer_head_mask,
480
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
481
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
482
+ init_cache: bool = False,
483
+ deterministic: bool = True,
484
+ output_attentions: bool = False,
485
+ ):
486
+ # Self Attention
487
+ attention_outputs = self.attention(
488
+ hidden_states,
489
+ attention_mask,
490
+ layer_head_mask=layer_head_mask,
491
+ init_cache=init_cache,
492
+ deterministic=deterministic,
493
+ output_attentions=output_attentions,
494
+ )
495
+ attention_output = attention_outputs[0]
496
+
497
+ # Cross-Attention Block
498
+ if encoder_hidden_states is not None:
499
+ cross_attention_outputs = self.crossattention(
500
+ attention_output,
501
+ attention_mask=encoder_attention_mask,
502
+ layer_head_mask=layer_head_mask,
503
+ key_value_states=encoder_hidden_states,
504
+ deterministic=deterministic,
505
+ output_attentions=output_attentions,
506
+ )
507
+ attention_output = cross_attention_outputs[0]
508
+
509
+ hidden_states = self.intermediate(attention_output)
510
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
511
+
512
+ outputs = (hidden_states,)
513
+
514
+ if output_attentions:
515
+ outputs += (attention_outputs[1],)
516
+ if encoder_hidden_states is not None:
517
+ outputs += (cross_attention_outputs[1],)
518
+ return outputs
519
+
520
+
521
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->Electra
522
+ class FlaxElectraLayerCollection(nn.Module):
523
+ config: ElectraConfig
524
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
525
+ gradient_checkpointing: bool = False
526
+
527
+ def setup(self):
528
+ if self.gradient_checkpointing:
529
+ FlaxElectraCheckpointLayer = remat(FlaxElectraLayer, static_argnums=(5, 6, 7))
530
+ self.layers = [
531
+ FlaxElectraCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
532
+ for i in range(self.config.num_hidden_layers)
533
+ ]
534
+ else:
535
+ self.layers = [
536
+ FlaxElectraLayer(self.config, name=str(i), dtype=self.dtype)
537
+ for i in range(self.config.num_hidden_layers)
538
+ ]
539
+
540
+ def __call__(
541
+ self,
542
+ hidden_states,
543
+ attention_mask,
544
+ head_mask,
545
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
546
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
547
+ init_cache: bool = False,
548
+ deterministic: bool = True,
549
+ output_attentions: bool = False,
550
+ output_hidden_states: bool = False,
551
+ return_dict: bool = True,
552
+ ):
553
+ all_attentions = () if output_attentions else None
554
+ all_hidden_states = () if output_hidden_states else None
555
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
556
+
557
+ # Check if head_mask has a correct number of layers specified if desired
558
+ if head_mask is not None:
559
+ if head_mask.shape[0] != (len(self.layers)):
560
+ raise ValueError(
561
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
562
+ f" {head_mask.shape[0]}."
563
+ )
564
+
565
+ for i, layer in enumerate(self.layers):
566
+ if output_hidden_states:
567
+ all_hidden_states += (hidden_states,)
568
+
569
+ layer_outputs = layer(
570
+ hidden_states,
571
+ attention_mask,
572
+ head_mask[i] if head_mask is not None else None,
573
+ encoder_hidden_states,
574
+ encoder_attention_mask,
575
+ init_cache,
576
+ deterministic,
577
+ output_attentions,
578
+ )
579
+
580
+ hidden_states = layer_outputs[0]
581
+
582
+ if output_attentions:
583
+ all_attentions += (layer_outputs[1],)
584
+
585
+ if encoder_hidden_states is not None:
586
+ all_cross_attentions += (layer_outputs[2],)
587
+
588
+ if output_hidden_states:
589
+ all_hidden_states += (hidden_states,)
590
+
591
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
592
+
593
+ if not return_dict:
594
+ return tuple(v for v in outputs if v is not None)
595
+
596
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
597
+ last_hidden_state=hidden_states,
598
+ hidden_states=all_hidden_states,
599
+ attentions=all_attentions,
600
+ cross_attentions=all_cross_attentions,
601
+ )
602
+
603
+
604
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->Electra
605
+ class FlaxElectraEncoder(nn.Module):
606
+ config: ElectraConfig
607
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
608
+ gradient_checkpointing: bool = False
609
+
610
+ def setup(self):
611
+ self.layer = FlaxElectraLayerCollection(
612
+ self.config,
613
+ dtype=self.dtype,
614
+ gradient_checkpointing=self.gradient_checkpointing,
615
+ )
616
+
617
+ def __call__(
618
+ self,
619
+ hidden_states,
620
+ attention_mask,
621
+ head_mask,
622
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
623
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
624
+ init_cache: bool = False,
625
+ deterministic: bool = True,
626
+ output_attentions: bool = False,
627
+ output_hidden_states: bool = False,
628
+ return_dict: bool = True,
629
+ ):
630
+ return self.layer(
631
+ hidden_states,
632
+ attention_mask,
633
+ head_mask=head_mask,
634
+ encoder_hidden_states=encoder_hidden_states,
635
+ encoder_attention_mask=encoder_attention_mask,
636
+ init_cache=init_cache,
637
+ deterministic=deterministic,
638
+ output_attentions=output_attentions,
639
+ output_hidden_states=output_hidden_states,
640
+ return_dict=return_dict,
641
+ )
642
+
643
+
644
+ class FlaxElectraGeneratorPredictions(nn.Module):
645
+ config: ElectraConfig
646
+ dtype: jnp.dtype = jnp.float32
647
+
648
+ def setup(self):
649
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
650
+ self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype)
651
+
652
+ def __call__(self, hidden_states):
653
+ hidden_states = self.dense(hidden_states)
654
+ hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
655
+ hidden_states = self.LayerNorm(hidden_states)
656
+ return hidden_states
657
+
658
+
659
+ class FlaxElectraDiscriminatorPredictions(nn.Module):
660
+ """Prediction module for the discriminator, made up of two dense layers."""
661
+
662
+ config: ElectraConfig
663
+ dtype: jnp.dtype = jnp.float32
664
+
665
+ def setup(self):
666
+ self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
667
+ self.dense_prediction = nn.Dense(1, dtype=self.dtype)
668
+
669
+ def __call__(self, hidden_states):
670
+ hidden_states = self.dense(hidden_states)
671
+ hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
672
+ hidden_states = self.dense_prediction(hidden_states).squeeze(-1)
673
+ return hidden_states
674
+
675
+
676
+ class FlaxElectraPreTrainedModel(FlaxPreTrainedModel):
677
+ """
678
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
679
+ models.
680
+ """
681
+
682
+ config_class = ElectraConfig
683
+ base_model_prefix = "electra"
684
+ module_class: nn.Module = None
685
+
686
+ def __init__(
687
+ self,
688
+ config: ElectraConfig,
689
+ input_shape: Tuple = (1, 1),
690
+ seed: int = 0,
691
+ dtype: jnp.dtype = jnp.float32,
692
+ _do_init: bool = True,
693
+ gradient_checkpointing: bool = False,
694
+ **kwargs,
695
+ ):
696
+ module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs)
697
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
698
+
699
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing
700
+ def enable_gradient_checkpointing(self):
701
+ self._module = self.module_class(
702
+ config=self.config,
703
+ dtype=self.dtype,
704
+ gradient_checkpointing=True,
705
+ )
706
+
707
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.init_weights
708
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
709
+ # init input tensors
710
+ input_ids = jnp.zeros(input_shape, dtype="i4")
711
+ token_type_ids = jnp.zeros_like(input_ids)
712
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
713
+ attention_mask = jnp.ones_like(input_ids)
714
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
715
+
716
+ params_rng, dropout_rng = jax.random.split(rng)
717
+ rngs = {"params": params_rng, "dropout": dropout_rng}
718
+
719
+ if self.config.add_cross_attention:
720
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
721
+ encoder_attention_mask = attention_mask
722
+ module_init_outputs = self.module.init(
723
+ rngs,
724
+ input_ids,
725
+ attention_mask,
726
+ token_type_ids,
727
+ position_ids,
728
+ head_mask,
729
+ encoder_hidden_states,
730
+ encoder_attention_mask,
731
+ return_dict=False,
732
+ )
733
+ else:
734
+ module_init_outputs = self.module.init(
735
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
736
+ )
737
+
738
+ random_params = module_init_outputs["params"]
739
+
740
+ if params is not None:
741
+ random_params = flatten_dict(unfreeze(random_params))
742
+ params = flatten_dict(unfreeze(params))
743
+ for missing_key in self._missing_keys:
744
+ params[missing_key] = random_params[missing_key]
745
+ self._missing_keys = set()
746
+ return freeze(unflatten_dict(params))
747
+ else:
748
+ return random_params
749
+
750
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
751
+ def init_cache(self, batch_size, max_length):
752
+ r"""
753
+ Args:
754
+ batch_size (`int`):
755
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
756
+ max_length (`int`):
757
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
758
+ cache.
759
+ """
760
+ # init input variables to retrieve cache
761
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
762
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
763
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
764
+
765
+ init_variables = self.module.init(
766
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
767
+ )
768
+ return unfreeze(init_variables["cache"])
769
+
770
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
771
+ def __call__(
772
+ self,
773
+ input_ids,
774
+ attention_mask=None,
775
+ token_type_ids=None,
776
+ position_ids=None,
777
+ head_mask=None,
778
+ encoder_hidden_states=None,
779
+ encoder_attention_mask=None,
780
+ params: dict = None,
781
+ dropout_rng: jax.random.PRNGKey = None,
782
+ train: bool = False,
783
+ output_attentions: Optional[bool] = None,
784
+ output_hidden_states: Optional[bool] = None,
785
+ return_dict: Optional[bool] = None,
786
+ past_key_values: dict = None,
787
+ ):
788
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
789
+ output_hidden_states = (
790
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
791
+ )
792
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
793
+
794
+ # init input tensors if not passed
795
+ if token_type_ids is None:
796
+ token_type_ids = jnp.ones_like(input_ids)
797
+
798
+ if position_ids is None:
799
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
800
+
801
+ if attention_mask is None:
802
+ attention_mask = jnp.ones_like(input_ids)
803
+
804
+ if head_mask is None:
805
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
806
+
807
+ # Handle any PRNG if needed
808
+ rngs = {}
809
+ if dropout_rng is not None:
810
+ rngs["dropout"] = dropout_rng
811
+
812
+ inputs = {"params": params or self.params}
813
+
814
+ if self.config.add_cross_attention:
815
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
816
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
817
+ # changed by FlaxElectraAttention module
818
+ if past_key_values:
819
+ inputs["cache"] = past_key_values
820
+ mutable = ["cache"]
821
+ else:
822
+ mutable = False
823
+
824
+ outputs = self.module.apply(
825
+ inputs,
826
+ jnp.array(input_ids, dtype="i4"),
827
+ jnp.array(attention_mask, dtype="i4"),
828
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
829
+ position_ids=jnp.array(position_ids, dtype="i4"),
830
+ head_mask=jnp.array(head_mask, dtype="i4"),
831
+ encoder_hidden_states=encoder_hidden_states,
832
+ encoder_attention_mask=encoder_attention_mask,
833
+ deterministic=not train,
834
+ output_attentions=output_attentions,
835
+ output_hidden_states=output_hidden_states,
836
+ return_dict=return_dict,
837
+ rngs=rngs,
838
+ mutable=mutable,
839
+ )
840
+
841
+ # add updated cache to model output
842
+ if past_key_values is not None and return_dict:
843
+ outputs, past_key_values = outputs
844
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
845
+ return outputs
846
+ elif past_key_values is not None and not return_dict:
847
+ outputs, past_key_values = outputs
848
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
849
+
850
+ else:
851
+ outputs = self.module.apply(
852
+ inputs,
853
+ jnp.array(input_ids, dtype="i4"),
854
+ jnp.array(attention_mask, dtype="i4"),
855
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
856
+ position_ids=jnp.array(position_ids, dtype="i4"),
857
+ head_mask=jnp.array(head_mask, dtype="i4"),
858
+ deterministic=not train,
859
+ output_attentions=output_attentions,
860
+ output_hidden_states=output_hidden_states,
861
+ return_dict=return_dict,
862
+ rngs=rngs,
863
+ )
864
+
865
+ return outputs
866
+
867
+
868
+ class FlaxElectraModule(nn.Module):
869
+ config: ElectraConfig
870
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
871
+ gradient_checkpointing: bool = False
872
+
873
+ def setup(self):
874
+ self.embeddings = FlaxElectraEmbeddings(self.config, dtype=self.dtype)
875
+ if self.config.embedding_size != self.config.hidden_size:
876
+ self.embeddings_project = nn.Dense(self.config.hidden_size, dtype=self.dtype)
877
+ self.encoder = FlaxElectraEncoder(
878
+ self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
879
+ )
880
+
881
+ def __call__(
882
+ self,
883
+ input_ids,
884
+ attention_mask,
885
+ token_type_ids,
886
+ position_ids,
887
+ head_mask: Optional[np.ndarray] = None,
888
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
889
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
890
+ init_cache: bool = False,
891
+ deterministic: bool = True,
892
+ output_attentions: bool = False,
893
+ output_hidden_states: bool = False,
894
+ return_dict: bool = True,
895
+ ):
896
+ embeddings = self.embeddings(
897
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
898
+ )
899
+ if hasattr(self, "embeddings_project"):
900
+ embeddings = self.embeddings_project(embeddings)
901
+
902
+ return self.encoder(
903
+ embeddings,
904
+ attention_mask,
905
+ head_mask=head_mask,
906
+ deterministic=deterministic,
907
+ encoder_hidden_states=encoder_hidden_states,
908
+ encoder_attention_mask=encoder_attention_mask,
909
+ init_cache=init_cache,
910
+ output_attentions=output_attentions,
911
+ output_hidden_states=output_hidden_states,
912
+ return_dict=return_dict,
913
+ )
914
+
915
+
916
+ @add_start_docstrings(
917
+ "The bare Electra Model transformer outputting raw hidden-states without any specific head on top.",
918
+ ELECTRA_START_DOCSTRING,
919
+ )
920
+ class FlaxElectraModel(FlaxElectraPreTrainedModel):
921
+ module_class = FlaxElectraModule
922
+
923
+
924
+ append_call_sample_docstring(FlaxElectraModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)
925
+
926
+
927
+ class FlaxElectraTiedDense(nn.Module):
928
+ embedding_size: int
929
+ dtype: jnp.dtype = jnp.float32
930
+ precision = None
931
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
932
+
933
+ def setup(self):
934
+ self.bias = self.param("bias", self.bias_init, (self.embedding_size,))
935
+
936
+ def __call__(self, x, kernel):
937
+ x = jnp.asarray(x, self.dtype)
938
+ kernel = jnp.asarray(kernel, self.dtype)
939
+ y = lax.dot_general(
940
+ x,
941
+ kernel,
942
+ (((x.ndim - 1,), (0,)), ((), ())),
943
+ precision=self.precision,
944
+ )
945
+ bias = jnp.asarray(self.bias, self.dtype)
946
+ return y + bias
947
+
948
+
949
+ class FlaxElectraForMaskedLMModule(nn.Module):
950
+ config: ElectraConfig
951
+ dtype: jnp.dtype = jnp.float32
952
+ gradient_checkpointing: bool = False
953
+
954
+ def setup(self):
955
+ self.electra = FlaxElectraModule(
956
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
957
+ )
958
+ self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype)
959
+ if self.config.tie_word_embeddings:
960
+ self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype)
961
+ else:
962
+ self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype)
963
+
964
+ def __call__(
965
+ self,
966
+ input_ids,
967
+ attention_mask=None,
968
+ token_type_ids=None,
969
+ position_ids=None,
970
+ head_mask=None,
971
+ deterministic: bool = True,
972
+ output_attentions: bool = False,
973
+ output_hidden_states: bool = False,
974
+ return_dict: bool = True,
975
+ ):
976
+ outputs = self.electra(
977
+ input_ids,
978
+ attention_mask,
979
+ token_type_ids,
980
+ position_ids,
981
+ head_mask,
982
+ deterministic=deterministic,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+ hidden_states = outputs[0]
988
+ prediction_scores = self.generator_predictions(hidden_states)
989
+
990
+ if self.config.tie_word_embeddings:
991
+ shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
992
+ prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T)
993
+ else:
994
+ prediction_scores = self.generator_lm_head(prediction_scores)
995
+
996
+ if not return_dict:
997
+ return (prediction_scores,) + outputs[1:]
998
+
999
+ return FlaxMaskedLMOutput(
1000
+ logits=prediction_scores,
1001
+ hidden_states=outputs.hidden_states,
1002
+ attentions=outputs.attentions,
1003
+ )
1004
+
1005
+
1006
+ @add_start_docstrings("""Electra Model with a `language modeling` head on top.""", ELECTRA_START_DOCSTRING)
1007
+ class FlaxElectraForMaskedLM(FlaxElectraPreTrainedModel):
1008
+ module_class = FlaxElectraForMaskedLMModule
1009
+
1010
+
1011
+ append_call_sample_docstring(FlaxElectraForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
1012
+
1013
+
1014
+ class FlaxElectraForPreTrainingModule(nn.Module):
1015
+ config: ElectraConfig
1016
+ dtype: jnp.dtype = jnp.float32
1017
+ gradient_checkpointing: bool = False
1018
+
1019
+ def setup(self):
1020
+ self.electra = FlaxElectraModule(
1021
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1022
+ )
1023
+ self.discriminator_predictions = FlaxElectraDiscriminatorPredictions(config=self.config, dtype=self.dtype)
1024
+
1025
+ def __call__(
1026
+ self,
1027
+ input_ids,
1028
+ attention_mask=None,
1029
+ token_type_ids=None,
1030
+ position_ids=None,
1031
+ head_mask=None,
1032
+ deterministic: bool = True,
1033
+ output_attentions: bool = False,
1034
+ output_hidden_states: bool = False,
1035
+ return_dict: bool = True,
1036
+ ):
1037
+ # Model
1038
+ outputs = self.electra(
1039
+ input_ids,
1040
+ attention_mask,
1041
+ token_type_ids,
1042
+ position_ids,
1043
+ head_mask,
1044
+ deterministic=deterministic,
1045
+ output_attentions=output_attentions,
1046
+ output_hidden_states=output_hidden_states,
1047
+ return_dict=return_dict,
1048
+ )
1049
+ hidden_states = outputs[0]
1050
+
1051
+ logits = self.discriminator_predictions(hidden_states)
1052
+
1053
+ if not return_dict:
1054
+ return (logits,) + outputs[1:]
1055
+
1056
+ return FlaxElectraForPreTrainingOutput(
1057
+ logits=logits,
1058
+ hidden_states=outputs.hidden_states,
1059
+ attentions=outputs.attentions,
1060
+ )
1061
+
1062
+
1063
+ @add_start_docstrings(
1064
+ """
1065
+ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
1066
+
1067
+ It is recommended to load the discriminator checkpoint into that model.
1068
+ """,
1069
+ ELECTRA_START_DOCSTRING,
1070
+ )
1071
+ class FlaxElectraForPreTraining(FlaxElectraPreTrainedModel):
1072
+ module_class = FlaxElectraForPreTrainingModule
1073
+
1074
+
1075
+ FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING = """
1076
+ Returns:
1077
+
1078
+ Example:
1079
+
1080
+ ```python
1081
+ >>> from transformers import AutoTokenizer, FlaxElectraForPreTraining
1082
+
1083
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-small-discriminator")
1084
+ >>> model = FlaxElectraForPreTraining.from_pretrained("google/electra-small-discriminator")
1085
+
1086
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
1087
+ >>> outputs = model(**inputs)
1088
+
1089
+ >>> prediction_logits = outputs.logits
1090
+ ```
1091
+ """
1092
+
1093
+ overwrite_call_docstring(
1094
+ FlaxElectraForPreTraining,
1095
+ ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING,
1096
+ )
1097
+ append_replace_return_docstrings(
1098
+ FlaxElectraForPreTraining, output_type=FlaxElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
1099
+ )
1100
+
1101
+
1102
+ class FlaxElectraForTokenClassificationModule(nn.Module):
1103
+ config: ElectraConfig
1104
+ dtype: jnp.dtype = jnp.float32
1105
+ gradient_checkpointing: bool = False
1106
+
1107
+ def setup(self):
1108
+ self.electra = FlaxElectraModule(
1109
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1110
+ )
1111
+ classifier_dropout = (
1112
+ self.config.classifier_dropout
1113
+ if self.config.classifier_dropout is not None
1114
+ else self.config.hidden_dropout_prob
1115
+ )
1116
+ self.dropout = nn.Dropout(classifier_dropout)
1117
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
1118
+
1119
+ def __call__(
1120
+ self,
1121
+ input_ids,
1122
+ attention_mask=None,
1123
+ token_type_ids=None,
1124
+ position_ids=None,
1125
+ head_mask=None,
1126
+ deterministic: bool = True,
1127
+ output_attentions: bool = False,
1128
+ output_hidden_states: bool = False,
1129
+ return_dict: bool = True,
1130
+ ):
1131
+ # Model
1132
+ outputs = self.electra(
1133
+ input_ids,
1134
+ attention_mask,
1135
+ token_type_ids,
1136
+ position_ids,
1137
+ head_mask,
1138
+ deterministic=deterministic,
1139
+ output_attentions=output_attentions,
1140
+ output_hidden_states=output_hidden_states,
1141
+ return_dict=return_dict,
1142
+ )
1143
+ hidden_states = outputs[0]
1144
+
1145
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1146
+ logits = self.classifier(hidden_states)
1147
+
1148
+ if not return_dict:
1149
+ return (logits,) + outputs[1:]
1150
+
1151
+ return FlaxTokenClassifierOutput(
1152
+ logits=logits,
1153
+ hidden_states=outputs.hidden_states,
1154
+ attentions=outputs.attentions,
1155
+ )
1156
+
1157
+
1158
+ @add_start_docstrings(
1159
+ """
1160
+ Electra model with a token classification head on top.
1161
+
1162
+ Both the discriminator and generator may be loaded into this model.
1163
+ """,
1164
+ ELECTRA_START_DOCSTRING,
1165
+ )
1166
+ class FlaxElectraForTokenClassification(FlaxElectraPreTrainedModel):
1167
+ module_class = FlaxElectraForTokenClassificationModule
1168
+
1169
+
1170
+ append_call_sample_docstring(
1171
+ FlaxElectraForTokenClassification,
1172
+ _CHECKPOINT_FOR_DOC,
1173
+ FlaxTokenClassifierOutput,
1174
+ _CONFIG_FOR_DOC,
1175
+ )
1176
+
1177
+
1178
+ def identity(x, **kwargs):
1179
+ return x
1180
+
1181
+
1182
+ class FlaxElectraSequenceSummary(nn.Module):
1183
+ r"""
1184
+ Compute a single vector summary of a sequence hidden states.
1185
+
1186
+ Args:
1187
+ config ([`PretrainedConfig`]):
1188
+ The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
1189
+ config class of your model for the default values it uses):
1190
+
1191
+ - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
1192
+ - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
1193
+ (otherwise to `config.hidden_size`).
1194
+ - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
1195
+ another string or `None` will add no activation.
1196
+ - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
1197
+ - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
1198
+ """
1199
+
1200
+ config: ElectraConfig
1201
+ dtype: jnp.dtype = jnp.float32
1202
+
1203
+ def setup(self):
1204
+ self.summary = identity
1205
+ if hasattr(self.config, "summary_use_proj") and self.config.summary_use_proj:
1206
+ if (
1207
+ hasattr(self.config, "summary_proj_to_labels")
1208
+ and self.config.summary_proj_to_labels
1209
+ and self.config.num_labels > 0
1210
+ ):
1211
+ num_classes = self.config.num_labels
1212
+ else:
1213
+ num_classes = self.config.hidden_size
1214
+ self.summary = nn.Dense(num_classes, dtype=self.dtype)
1215
+
1216
+ activation_string = getattr(self.config, "summary_activation", None)
1217
+ self.activation = ACT2FN[activation_string] if activation_string else lambda x: x # noqa F407
1218
+
1219
+ self.first_dropout = identity
1220
+ if hasattr(self.config, "summary_first_dropout") and self.config.summary_first_dropout > 0:
1221
+ self.first_dropout = nn.Dropout(self.config.summary_first_dropout)
1222
+
1223
+ self.last_dropout = identity
1224
+ if hasattr(self.config, "summary_last_dropout") and self.config.summary_last_dropout > 0:
1225
+ self.last_dropout = nn.Dropout(self.config.summary_last_dropout)
1226
+
1227
+ def __call__(self, hidden_states, cls_index=None, deterministic: bool = True):
1228
+ """
1229
+ Compute a single vector summary of a sequence hidden states.
1230
+
1231
+ Args:
1232
+ hidden_states (`jnp.ndarray` of shape `[batch_size, seq_len, hidden_size]`):
1233
+ The hidden states of the last layer.
1234
+ cls_index (`jnp.ndarray` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
1235
+ Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
1236
+
1237
+ Returns:
1238
+ `jnp.ndarray`: The summary of the sequence hidden states.
1239
+ """
1240
+ # NOTE: this doest "first" type summary always
1241
+ output = hidden_states[:, 0]
1242
+ output = self.first_dropout(output, deterministic=deterministic)
1243
+ output = self.summary(output)
1244
+ output = self.activation(output)
1245
+ output = self.last_dropout(output, deterministic=deterministic)
1246
+ return output
1247
+
1248
+
1249
+ class FlaxElectraForMultipleChoiceModule(nn.Module):
1250
+ config: ElectraConfig
1251
+ dtype: jnp.dtype = jnp.float32
1252
+ gradient_checkpointing: bool = False
1253
+
1254
+ def setup(self):
1255
+ self.electra = FlaxElectraModule(
1256
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1257
+ )
1258
+ self.sequence_summary = FlaxElectraSequenceSummary(config=self.config, dtype=self.dtype)
1259
+ self.classifier = nn.Dense(1, dtype=self.dtype)
1260
+
1261
+ def __call__(
1262
+ self,
1263
+ input_ids,
1264
+ attention_mask=None,
1265
+ token_type_ids=None,
1266
+ position_ids=None,
1267
+ head_mask=None,
1268
+ deterministic: bool = True,
1269
+ output_attentions: bool = False,
1270
+ output_hidden_states: bool = False,
1271
+ return_dict: bool = True,
1272
+ ):
1273
+ num_choices = input_ids.shape[1]
1274
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
1275
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
1276
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
1277
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
1278
+
1279
+ # Model
1280
+ outputs = self.electra(
1281
+ input_ids,
1282
+ attention_mask,
1283
+ token_type_ids,
1284
+ position_ids,
1285
+ head_mask,
1286
+ deterministic=deterministic,
1287
+ output_attentions=output_attentions,
1288
+ output_hidden_states=output_hidden_states,
1289
+ return_dict=return_dict,
1290
+ )
1291
+ hidden_states = outputs[0]
1292
+ pooled_output = self.sequence_summary(hidden_states, deterministic=deterministic)
1293
+ logits = self.classifier(pooled_output)
1294
+
1295
+ reshaped_logits = logits.reshape(-1, num_choices)
1296
+
1297
+ if not return_dict:
1298
+ return (reshaped_logits,) + outputs[1:]
1299
+
1300
+ return FlaxMultipleChoiceModelOutput(
1301
+ logits=reshaped_logits,
1302
+ hidden_states=outputs.hidden_states,
1303
+ attentions=outputs.attentions,
1304
+ )
1305
+
1306
+
1307
+ @add_start_docstrings(
1308
+ """
1309
+ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1310
+ softmax) e.g. for RocStories/SWAG tasks.
1311
+ """,
1312
+ ELECTRA_START_DOCSTRING,
1313
+ )
1314
+ class FlaxElectraForMultipleChoice(FlaxElectraPreTrainedModel):
1315
+ module_class = FlaxElectraForMultipleChoiceModule
1316
+
1317
+
1318
+ # adapt docstring slightly for FlaxElectraForMultipleChoice
1319
+ overwrite_call_docstring(
1320
+ FlaxElectraForMultipleChoice, ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1321
+ )
1322
+ append_call_sample_docstring(
1323
+ FlaxElectraForMultipleChoice,
1324
+ _CHECKPOINT_FOR_DOC,
1325
+ FlaxMultipleChoiceModelOutput,
1326
+ _CONFIG_FOR_DOC,
1327
+ )
1328
+
1329
+
1330
+ class FlaxElectraForQuestionAnsweringModule(nn.Module):
1331
+ config: ElectraConfig
1332
+ dtype: jnp.dtype = jnp.float32
1333
+ gradient_checkpointing: bool = False
1334
+
1335
+ def setup(self):
1336
+ self.electra = FlaxElectraModule(
1337
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1338
+ )
1339
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1340
+
1341
+ def __call__(
1342
+ self,
1343
+ input_ids,
1344
+ attention_mask=None,
1345
+ token_type_ids=None,
1346
+ position_ids=None,
1347
+ head_mask=None,
1348
+ deterministic: bool = True,
1349
+ output_attentions: bool = False,
1350
+ output_hidden_states: bool = False,
1351
+ return_dict: bool = True,
1352
+ ):
1353
+ # Model
1354
+ outputs = self.electra(
1355
+ input_ids,
1356
+ attention_mask,
1357
+ token_type_ids,
1358
+ position_ids,
1359
+ head_mask,
1360
+ deterministic=deterministic,
1361
+ output_attentions=output_attentions,
1362
+ output_hidden_states=output_hidden_states,
1363
+ return_dict=return_dict,
1364
+ )
1365
+ hidden_states = outputs[0]
1366
+ logits = self.qa_outputs(hidden_states)
1367
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
1368
+ start_logits = start_logits.squeeze(-1)
1369
+ end_logits = end_logits.squeeze(-1)
1370
+
1371
+ if not return_dict:
1372
+ return (start_logits, end_logits) + outputs[1:]
1373
+
1374
+ return FlaxQuestionAnsweringModelOutput(
1375
+ start_logits=start_logits,
1376
+ end_logits=end_logits,
1377
+ hidden_states=outputs.hidden_states,
1378
+ attentions=outputs.attentions,
1379
+ )
1380
+
1381
+
1382
+ @add_start_docstrings(
1383
+ """
1384
+ ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1385
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1386
+ """,
1387
+ ELECTRA_START_DOCSTRING,
1388
+ )
1389
+ class FlaxElectraForQuestionAnswering(FlaxElectraPreTrainedModel):
1390
+ module_class = FlaxElectraForQuestionAnsweringModule
1391
+
1392
+
1393
+ append_call_sample_docstring(
1394
+ FlaxElectraForQuestionAnswering,
1395
+ _CHECKPOINT_FOR_DOC,
1396
+ FlaxQuestionAnsweringModelOutput,
1397
+ _CONFIG_FOR_DOC,
1398
+ )
1399
+
1400
+
1401
+ class FlaxElectraClassificationHead(nn.Module):
1402
+ """Head for sentence-level classification tasks."""
1403
+
1404
+ config: ElectraConfig
1405
+ dtype: jnp.dtype = jnp.float32
1406
+
1407
+ def setup(self):
1408
+ self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
1409
+ classifier_dropout = (
1410
+ self.config.classifier_dropout
1411
+ if self.config.classifier_dropout is not None
1412
+ else self.config.hidden_dropout_prob
1413
+ )
1414
+ self.dropout = nn.Dropout(classifier_dropout)
1415
+ self.out_proj = nn.Dense(self.config.num_labels, dtype=self.dtype)
1416
+
1417
+ def __call__(self, hidden_states, deterministic: bool = True):
1418
+ x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
1419
+ x = self.dropout(x, deterministic=deterministic)
1420
+ x = self.dense(x)
1421
+ x = ACT2FN["gelu"](x) # although BERT uses tanh here, it seems Electra authors used gelu
1422
+ x = self.dropout(x, deterministic=deterministic)
1423
+ x = self.out_proj(x)
1424
+ return x
1425
+
1426
+
1427
+ class FlaxElectraForSequenceClassificationModule(nn.Module):
1428
+ config: ElectraConfig
1429
+ dtype: jnp.dtype = jnp.float32
1430
+ gradient_checkpointing: bool = False
1431
+
1432
+ def setup(self):
1433
+ self.electra = FlaxElectraModule(
1434
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1435
+ )
1436
+ self.classifier = FlaxElectraClassificationHead(config=self.config, dtype=self.dtype)
1437
+
1438
+ def __call__(
1439
+ self,
1440
+ input_ids,
1441
+ attention_mask=None,
1442
+ token_type_ids=None,
1443
+ position_ids=None,
1444
+ head_mask=None,
1445
+ deterministic: bool = True,
1446
+ output_attentions: bool = False,
1447
+ output_hidden_states: bool = False,
1448
+ return_dict: bool = True,
1449
+ ):
1450
+ # Model
1451
+ outputs = self.electra(
1452
+ input_ids,
1453
+ attention_mask,
1454
+ token_type_ids,
1455
+ position_ids,
1456
+ head_mask,
1457
+ deterministic=deterministic,
1458
+ output_attentions=output_attentions,
1459
+ output_hidden_states=output_hidden_states,
1460
+ return_dict=return_dict,
1461
+ )
1462
+ hidden_states = outputs[0]
1463
+ logits = self.classifier(hidden_states, deterministic=deterministic)
1464
+
1465
+ if not return_dict:
1466
+ return (logits,) + outputs[1:]
1467
+
1468
+ return FlaxSequenceClassifierOutput(
1469
+ logits=logits,
1470
+ hidden_states=outputs.hidden_states,
1471
+ attentions=outputs.attentions,
1472
+ )
1473
+
1474
+
1475
+ @add_start_docstrings(
1476
+ """
1477
+ Electra Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1478
+ pooled output) e.g. for GLUE tasks.
1479
+ """,
1480
+ ELECTRA_START_DOCSTRING,
1481
+ )
1482
+ class FlaxElectraForSequenceClassification(FlaxElectraPreTrainedModel):
1483
+ module_class = FlaxElectraForSequenceClassificationModule
1484
+
1485
+
1486
+ append_call_sample_docstring(
1487
+ FlaxElectraForSequenceClassification,
1488
+ _CHECKPOINT_FOR_DOC,
1489
+ FlaxSequenceClassifierOutput,
1490
+ _CONFIG_FOR_DOC,
1491
+ )
1492
+
1493
+
1494
+ class FlaxElectraForCausalLMModule(nn.Module):
1495
+ config: ElectraConfig
1496
+ dtype: jnp.dtype = jnp.float32
1497
+ gradient_checkpointing: bool = False
1498
+
1499
+ def setup(self):
1500
+ self.electra = FlaxElectraModule(
1501
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1502
+ )
1503
+ self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype)
1504
+ if self.config.tie_word_embeddings:
1505
+ self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype)
1506
+ else:
1507
+ self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype)
1508
+
1509
+ def __call__(
1510
+ self,
1511
+ input_ids,
1512
+ attention_mask: Optional[jnp.ndarray] = None,
1513
+ token_type_ids: Optional[jnp.ndarray] = None,
1514
+ position_ids: Optional[jnp.ndarray] = None,
1515
+ head_mask: Optional[jnp.ndarray] = None,
1516
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1517
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1518
+ init_cache: bool = False,
1519
+ deterministic: bool = True,
1520
+ output_attentions: bool = False,
1521
+ output_hidden_states: bool = False,
1522
+ return_dict: bool = True,
1523
+ ):
1524
+ outputs = self.electra(
1525
+ input_ids,
1526
+ attention_mask,
1527
+ token_type_ids,
1528
+ position_ids,
1529
+ head_mask,
1530
+ encoder_hidden_states=encoder_hidden_states,
1531
+ encoder_attention_mask=encoder_attention_mask,
1532
+ init_cache=init_cache,
1533
+ deterministic=deterministic,
1534
+ output_attentions=output_attentions,
1535
+ output_hidden_states=output_hidden_states,
1536
+ return_dict=return_dict,
1537
+ )
1538
+ hidden_states = outputs[0]
1539
+ prediction_scores = self.generator_predictions(hidden_states)
1540
+
1541
+ if self.config.tie_word_embeddings:
1542
+ shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1543
+ prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T)
1544
+ else:
1545
+ prediction_scores = self.generator_lm_head(prediction_scores)
1546
+
1547
+ if not return_dict:
1548
+ return (prediction_scores,) + outputs[1:]
1549
+
1550
+ return FlaxCausalLMOutputWithCrossAttentions(
1551
+ logits=prediction_scores,
1552
+ hidden_states=outputs.hidden_states,
1553
+ attentions=outputs.attentions,
1554
+ cross_attentions=outputs.cross_attentions,
1555
+ )
1556
+
1557
+
1558
+ @add_start_docstrings(
1559
+ """
1560
+ Electra Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
1561
+ autoregressive tasks.
1562
+ """,
1563
+ ELECTRA_START_DOCSTRING,
1564
+ )
1565
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForCausalLM with Bert->Electra
1566
+ class FlaxElectraForCausalLM(FlaxElectraPreTrainedModel):
1567
+ module_class = FlaxElectraForCausalLMModule
1568
+
1569
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
1570
+ # initializing the cache
1571
+ batch_size, seq_length = input_ids.shape
1572
+
1573
+ past_key_values = self.init_cache(batch_size, max_length)
1574
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1575
+ # But since the decoder uses a causal mask, those positions are masked anyway.
1576
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
1577
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1578
+ if attention_mask is not None:
1579
+ position_ids = attention_mask.cumsum(axis=-1) - 1
1580
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
1581
+ else:
1582
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1583
+
1584
+ return {
1585
+ "past_key_values": past_key_values,
1586
+ "attention_mask": extended_attention_mask,
1587
+ "position_ids": position_ids,
1588
+ }
1589
+
1590
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1591
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1592
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
1593
+ return model_kwargs
1594
+
1595
+
1596
+ append_call_sample_docstring(
1597
+ FlaxElectraForCausalLM,
1598
+ _CHECKPOINT_FOR_DOC,
1599
+ FlaxCausalLMOutputWithCrossAttentions,
1600
+ _CONFIG_FOR_DOC,
1601
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/modeling_tf_electra.py ADDED
@@ -0,0 +1,1768 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF Electra model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutputWithPastAndCrossAttentions,
31
+ TFMaskedLMOutput,
32
+ TFMultipleChoiceModelOutput,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutput,
35
+ TFTokenClassifierOutput,
36
+ )
37
+ from ...modeling_tf_utils import (
38
+ TFMaskedLanguageModelingLoss,
39
+ TFModelInputType,
40
+ TFMultipleChoiceLoss,
41
+ TFPreTrainedModel,
42
+ TFQuestionAnsweringLoss,
43
+ TFSequenceClassificationLoss,
44
+ TFSequenceSummary,
45
+ TFTokenClassificationLoss,
46
+ get_initializer,
47
+ keras,
48
+ keras_serializable,
49
+ unpack_inputs,
50
+ )
51
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
52
+ from ...utils import (
53
+ ModelOutput,
54
+ add_code_sample_docstrings,
55
+ add_start_docstrings,
56
+ add_start_docstrings_to_model_forward,
57
+ logging,
58
+ replace_return_docstrings,
59
+ )
60
+ from .configuration_electra import ElectraConfig
61
+
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+ _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
66
+ _CONFIG_FOR_DOC = "ElectraConfig"
67
+
68
+
69
+ from ..deprecated._archive_maps import TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
70
+
71
+
72
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Electra
73
+ class TFElectraSelfAttention(keras.layers.Layer):
74
+ def __init__(self, config: ElectraConfig, **kwargs):
75
+ super().__init__(**kwargs)
76
+
77
+ if config.hidden_size % config.num_attention_heads != 0:
78
+ raise ValueError(
79
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
80
+ f"of attention heads ({config.num_attention_heads})"
81
+ )
82
+
83
+ self.num_attention_heads = config.num_attention_heads
84
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
85
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
86
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
87
+
88
+ self.query = keras.layers.Dense(
89
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
90
+ )
91
+ self.key = keras.layers.Dense(
92
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
93
+ )
94
+ self.value = keras.layers.Dense(
95
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
96
+ )
97
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
98
+
99
+ self.is_decoder = config.is_decoder
100
+ self.config = config
101
+
102
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
103
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
104
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
105
+
106
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
107
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
108
+
109
+ def call(
110
+ self,
111
+ hidden_states: tf.Tensor,
112
+ attention_mask: tf.Tensor,
113
+ head_mask: tf.Tensor,
114
+ encoder_hidden_states: tf.Tensor,
115
+ encoder_attention_mask: tf.Tensor,
116
+ past_key_value: Tuple[tf.Tensor],
117
+ output_attentions: bool,
118
+ training: bool = False,
119
+ ) -> Tuple[tf.Tensor]:
120
+ batch_size = shape_list(hidden_states)[0]
121
+ mixed_query_layer = self.query(inputs=hidden_states)
122
+
123
+ # If this is instantiated as a cross-attention module, the keys
124
+ # and values come from an encoder; the attention mask needs to be
125
+ # such that the encoder's padding tokens are not attended to.
126
+ is_cross_attention = encoder_hidden_states is not None
127
+
128
+ if is_cross_attention and past_key_value is not None:
129
+ # reuse k,v, cross_attentions
130
+ key_layer = past_key_value[0]
131
+ value_layer = past_key_value[1]
132
+ attention_mask = encoder_attention_mask
133
+ elif is_cross_attention:
134
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
135
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
136
+ attention_mask = encoder_attention_mask
137
+ elif past_key_value is not None:
138
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
139
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
140
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
141
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
142
+ else:
143
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
144
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
145
+
146
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
147
+
148
+ if self.is_decoder:
149
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
150
+ # Further calls to cross_attention layer can then reuse all cross-attention
151
+ # key/value_states (first "if" case)
152
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
153
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
154
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
155
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
156
+ past_key_value = (key_layer, value_layer)
157
+
158
+ # Take the dot product between "query" and "key" to get the raw attention scores.
159
+ # (batch size, num_heads, seq_len_q, seq_len_k)
160
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
161
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
162
+ attention_scores = tf.divide(attention_scores, dk)
163
+
164
+ if attention_mask is not None:
165
+ # Apply the attention mask is (precomputed for all layers in TFElectraModel call() function)
166
+ attention_scores = tf.add(attention_scores, attention_mask)
167
+
168
+ # Normalize the attention scores to probabilities.
169
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
170
+
171
+ # This is actually dropping out entire tokens to attend to, which might
172
+ # seem a bit unusual, but is taken from the original Transformer paper.
173
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
174
+
175
+ # Mask heads if we want to
176
+ if head_mask is not None:
177
+ attention_probs = tf.multiply(attention_probs, head_mask)
178
+
179
+ attention_output = tf.matmul(attention_probs, value_layer)
180
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
181
+
182
+ # (batch_size, seq_len_q, all_head_size)
183
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
184
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
185
+
186
+ if self.is_decoder:
187
+ outputs = outputs + (past_key_value,)
188
+ return outputs
189
+
190
+ def build(self, input_shape=None):
191
+ if self.built:
192
+ return
193
+ self.built = True
194
+ if getattr(self, "query", None) is not None:
195
+ with tf.name_scope(self.query.name):
196
+ self.query.build([None, None, self.config.hidden_size])
197
+ if getattr(self, "key", None) is not None:
198
+ with tf.name_scope(self.key.name):
199
+ self.key.build([None, None, self.config.hidden_size])
200
+ if getattr(self, "value", None) is not None:
201
+ with tf.name_scope(self.value.name):
202
+ self.value.build([None, None, self.config.hidden_size])
203
+
204
+
205
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Electra
206
+ class TFElectraSelfOutput(keras.layers.Layer):
207
+ def __init__(self, config: ElectraConfig, **kwargs):
208
+ super().__init__(**kwargs)
209
+
210
+ self.dense = keras.layers.Dense(
211
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
212
+ )
213
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
214
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
215
+ self.config = config
216
+
217
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
218
+ hidden_states = self.dense(inputs=hidden_states)
219
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
220
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
221
+
222
+ return hidden_states
223
+
224
+ def build(self, input_shape=None):
225
+ if self.built:
226
+ return
227
+ self.built = True
228
+ if getattr(self, "dense", None) is not None:
229
+ with tf.name_scope(self.dense.name):
230
+ self.dense.build([None, None, self.config.hidden_size])
231
+ if getattr(self, "LayerNorm", None) is not None:
232
+ with tf.name_scope(self.LayerNorm.name):
233
+ self.LayerNorm.build([None, None, self.config.hidden_size])
234
+
235
+
236
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Electra
237
+ class TFElectraAttention(keras.layers.Layer):
238
+ def __init__(self, config: ElectraConfig, **kwargs):
239
+ super().__init__(**kwargs)
240
+
241
+ self.self_attention = TFElectraSelfAttention(config, name="self")
242
+ self.dense_output = TFElectraSelfOutput(config, name="output")
243
+
244
+ def prune_heads(self, heads):
245
+ raise NotImplementedError
246
+
247
+ def call(
248
+ self,
249
+ input_tensor: tf.Tensor,
250
+ attention_mask: tf.Tensor,
251
+ head_mask: tf.Tensor,
252
+ encoder_hidden_states: tf.Tensor,
253
+ encoder_attention_mask: tf.Tensor,
254
+ past_key_value: Tuple[tf.Tensor],
255
+ output_attentions: bool,
256
+ training: bool = False,
257
+ ) -> Tuple[tf.Tensor]:
258
+ self_outputs = self.self_attention(
259
+ hidden_states=input_tensor,
260
+ attention_mask=attention_mask,
261
+ head_mask=head_mask,
262
+ encoder_hidden_states=encoder_hidden_states,
263
+ encoder_attention_mask=encoder_attention_mask,
264
+ past_key_value=past_key_value,
265
+ output_attentions=output_attentions,
266
+ training=training,
267
+ )
268
+ attention_output = self.dense_output(
269
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
270
+ )
271
+ # add attentions (possibly with past_key_value) if we output them
272
+ outputs = (attention_output,) + self_outputs[1:]
273
+
274
+ return outputs
275
+
276
+ def build(self, input_shape=None):
277
+ if self.built:
278
+ return
279
+ self.built = True
280
+ if getattr(self, "self_attention", None) is not None:
281
+ with tf.name_scope(self.self_attention.name):
282
+ self.self_attention.build(None)
283
+ if getattr(self, "dense_output", None) is not None:
284
+ with tf.name_scope(self.dense_output.name):
285
+ self.dense_output.build(None)
286
+
287
+
288
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Electra
289
+ class TFElectraIntermediate(keras.layers.Layer):
290
+ def __init__(self, config: ElectraConfig, **kwargs):
291
+ super().__init__(**kwargs)
292
+
293
+ self.dense = keras.layers.Dense(
294
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
295
+ )
296
+
297
+ if isinstance(config.hidden_act, str):
298
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
299
+ else:
300
+ self.intermediate_act_fn = config.hidden_act
301
+ self.config = config
302
+
303
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
304
+ hidden_states = self.dense(inputs=hidden_states)
305
+ hidden_states = self.intermediate_act_fn(hidden_states)
306
+
307
+ return hidden_states
308
+
309
+ def build(self, input_shape=None):
310
+ if self.built:
311
+ return
312
+ self.built = True
313
+ if getattr(self, "dense", None) is not None:
314
+ with tf.name_scope(self.dense.name):
315
+ self.dense.build([None, None, self.config.hidden_size])
316
+
317
+
318
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Electra
319
+ class TFElectraOutput(keras.layers.Layer):
320
+ def __init__(self, config: ElectraConfig, **kwargs):
321
+ super().__init__(**kwargs)
322
+
323
+ self.dense = keras.layers.Dense(
324
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
325
+ )
326
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
327
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
328
+ self.config = config
329
+
330
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
331
+ hidden_states = self.dense(inputs=hidden_states)
332
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
333
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
334
+
335
+ return hidden_states
336
+
337
+ def build(self, input_shape=None):
338
+ if self.built:
339
+ return
340
+ self.built = True
341
+ if getattr(self, "dense", None) is not None:
342
+ with tf.name_scope(self.dense.name):
343
+ self.dense.build([None, None, self.config.intermediate_size])
344
+ if getattr(self, "LayerNorm", None) is not None:
345
+ with tf.name_scope(self.LayerNorm.name):
346
+ self.LayerNorm.build([None, None, self.config.hidden_size])
347
+
348
+
349
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Electra
350
+ class TFElectraLayer(keras.layers.Layer):
351
+ def __init__(self, config: ElectraConfig, **kwargs):
352
+ super().__init__(**kwargs)
353
+
354
+ self.attention = TFElectraAttention(config, name="attention")
355
+ self.is_decoder = config.is_decoder
356
+ self.add_cross_attention = config.add_cross_attention
357
+ if self.add_cross_attention:
358
+ if not self.is_decoder:
359
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
360
+ self.crossattention = TFElectraAttention(config, name="crossattention")
361
+ self.intermediate = TFElectraIntermediate(config, name="intermediate")
362
+ self.bert_output = TFElectraOutput(config, name="output")
363
+
364
+ def call(
365
+ self,
366
+ hidden_states: tf.Tensor,
367
+ attention_mask: tf.Tensor,
368
+ head_mask: tf.Tensor,
369
+ encoder_hidden_states: tf.Tensor | None,
370
+ encoder_attention_mask: tf.Tensor | None,
371
+ past_key_value: Tuple[tf.Tensor] | None,
372
+ output_attentions: bool,
373
+ training: bool = False,
374
+ ) -> Tuple[tf.Tensor]:
375
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
376
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
377
+ self_attention_outputs = self.attention(
378
+ input_tensor=hidden_states,
379
+ attention_mask=attention_mask,
380
+ head_mask=head_mask,
381
+ encoder_hidden_states=None,
382
+ encoder_attention_mask=None,
383
+ past_key_value=self_attn_past_key_value,
384
+ output_attentions=output_attentions,
385
+ training=training,
386
+ )
387
+ attention_output = self_attention_outputs[0]
388
+
389
+ # if decoder, the last output is tuple of self-attn cache
390
+ if self.is_decoder:
391
+ outputs = self_attention_outputs[1:-1]
392
+ present_key_value = self_attention_outputs[-1]
393
+ else:
394
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
395
+
396
+ cross_attn_present_key_value = None
397
+ if self.is_decoder and encoder_hidden_states is not None:
398
+ if not hasattr(self, "crossattention"):
399
+ raise ValueError(
400
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
401
+ " by setting `config.add_cross_attention=True`"
402
+ )
403
+
404
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
405
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
406
+ cross_attention_outputs = self.crossattention(
407
+ input_tensor=attention_output,
408
+ attention_mask=attention_mask,
409
+ head_mask=head_mask,
410
+ encoder_hidden_states=encoder_hidden_states,
411
+ encoder_attention_mask=encoder_attention_mask,
412
+ past_key_value=cross_attn_past_key_value,
413
+ output_attentions=output_attentions,
414
+ training=training,
415
+ )
416
+ attention_output = cross_attention_outputs[0]
417
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
418
+
419
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
420
+ cross_attn_present_key_value = cross_attention_outputs[-1]
421
+ present_key_value = present_key_value + cross_attn_present_key_value
422
+
423
+ intermediate_output = self.intermediate(hidden_states=attention_output)
424
+ layer_output = self.bert_output(
425
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
426
+ )
427
+ outputs = (layer_output,) + outputs # add attentions if we output them
428
+
429
+ # if decoder, return the attn key/values as the last output
430
+ if self.is_decoder:
431
+ outputs = outputs + (present_key_value,)
432
+
433
+ return outputs
434
+
435
+ def build(self, input_shape=None):
436
+ if self.built:
437
+ return
438
+ self.built = True
439
+ if getattr(self, "attention", None) is not None:
440
+ with tf.name_scope(self.attention.name):
441
+ self.attention.build(None)
442
+ if getattr(self, "intermediate", None) is not None:
443
+ with tf.name_scope(self.intermediate.name):
444
+ self.intermediate.build(None)
445
+ if getattr(self, "bert_output", None) is not None:
446
+ with tf.name_scope(self.bert_output.name):
447
+ self.bert_output.build(None)
448
+ if getattr(self, "crossattention", None) is not None:
449
+ with tf.name_scope(self.crossattention.name):
450
+ self.crossattention.build(None)
451
+
452
+
453
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Electra
454
+ class TFElectraEncoder(keras.layers.Layer):
455
+ def __init__(self, config: ElectraConfig, **kwargs):
456
+ super().__init__(**kwargs)
457
+ self.config = config
458
+ self.layer = [TFElectraLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
459
+
460
+ def call(
461
+ self,
462
+ hidden_states: tf.Tensor,
463
+ attention_mask: tf.Tensor,
464
+ head_mask: tf.Tensor,
465
+ encoder_hidden_states: tf.Tensor | None,
466
+ encoder_attention_mask: tf.Tensor | None,
467
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
468
+ use_cache: Optional[bool],
469
+ output_attentions: bool,
470
+ output_hidden_states: bool,
471
+ return_dict: bool,
472
+ training: bool = False,
473
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
474
+ all_hidden_states = () if output_hidden_states else None
475
+ all_attentions = () if output_attentions else None
476
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
477
+
478
+ next_decoder_cache = () if use_cache else None
479
+ for i, layer_module in enumerate(self.layer):
480
+ if output_hidden_states:
481
+ all_hidden_states = all_hidden_states + (hidden_states,)
482
+
483
+ past_key_value = past_key_values[i] if past_key_values is not None else None
484
+
485
+ layer_outputs = layer_module(
486
+ hidden_states=hidden_states,
487
+ attention_mask=attention_mask,
488
+ head_mask=head_mask[i],
489
+ encoder_hidden_states=encoder_hidden_states,
490
+ encoder_attention_mask=encoder_attention_mask,
491
+ past_key_value=past_key_value,
492
+ output_attentions=output_attentions,
493
+ training=training,
494
+ )
495
+ hidden_states = layer_outputs[0]
496
+
497
+ if use_cache:
498
+ next_decoder_cache += (layer_outputs[-1],)
499
+
500
+ if output_attentions:
501
+ all_attentions = all_attentions + (layer_outputs[1],)
502
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
503
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
504
+
505
+ # Add last layer
506
+ if output_hidden_states:
507
+ all_hidden_states = all_hidden_states + (hidden_states,)
508
+
509
+ if not return_dict:
510
+ return tuple(
511
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
512
+ )
513
+
514
+ return TFBaseModelOutputWithPastAndCrossAttentions(
515
+ last_hidden_state=hidden_states,
516
+ past_key_values=next_decoder_cache,
517
+ hidden_states=all_hidden_states,
518
+ attentions=all_attentions,
519
+ cross_attentions=all_cross_attentions,
520
+ )
521
+
522
+ def build(self, input_shape=None):
523
+ if self.built:
524
+ return
525
+ self.built = True
526
+ if getattr(self, "layer", None) is not None:
527
+ for layer in self.layer:
528
+ with tf.name_scope(layer.name):
529
+ layer.build(None)
530
+
531
+
532
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Electra
533
+ class TFElectraPooler(keras.layers.Layer):
534
+ def __init__(self, config: ElectraConfig, **kwargs):
535
+ super().__init__(**kwargs)
536
+
537
+ self.dense = keras.layers.Dense(
538
+ units=config.hidden_size,
539
+ kernel_initializer=get_initializer(config.initializer_range),
540
+ activation="tanh",
541
+ name="dense",
542
+ )
543
+ self.config = config
544
+
545
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
546
+ # We "pool" the model by simply taking the hidden state corresponding
547
+ # to the first token.
548
+ first_token_tensor = hidden_states[:, 0]
549
+ pooled_output = self.dense(inputs=first_token_tensor)
550
+
551
+ return pooled_output
552
+
553
+ def build(self, input_shape=None):
554
+ if self.built:
555
+ return
556
+ self.built = True
557
+ if getattr(self, "dense", None) is not None:
558
+ with tf.name_scope(self.dense.name):
559
+ self.dense.build([None, None, self.config.hidden_size])
560
+
561
+
562
+ # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->Electra
563
+ class TFElectraEmbeddings(keras.layers.Layer):
564
+ """Construct the embeddings from word, position and token_type embeddings."""
565
+
566
+ def __init__(self, config: ElectraConfig, **kwargs):
567
+ super().__init__(**kwargs)
568
+
569
+ self.config = config
570
+ self.embedding_size = config.embedding_size
571
+ self.max_position_embeddings = config.max_position_embeddings
572
+ self.initializer_range = config.initializer_range
573
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
574
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
575
+
576
+ def build(self, input_shape=None):
577
+ with tf.name_scope("word_embeddings"):
578
+ self.weight = self.add_weight(
579
+ name="weight",
580
+ shape=[self.config.vocab_size, self.embedding_size],
581
+ initializer=get_initializer(self.initializer_range),
582
+ )
583
+
584
+ with tf.name_scope("token_type_embeddings"):
585
+ self.token_type_embeddings = self.add_weight(
586
+ name="embeddings",
587
+ shape=[self.config.type_vocab_size, self.embedding_size],
588
+ initializer=get_initializer(self.initializer_range),
589
+ )
590
+
591
+ with tf.name_scope("position_embeddings"):
592
+ self.position_embeddings = self.add_weight(
593
+ name="embeddings",
594
+ shape=[self.max_position_embeddings, self.embedding_size],
595
+ initializer=get_initializer(self.initializer_range),
596
+ )
597
+
598
+ if self.built:
599
+ return
600
+ self.built = True
601
+ if getattr(self, "LayerNorm", None) is not None:
602
+ with tf.name_scope(self.LayerNorm.name):
603
+ self.LayerNorm.build([None, None, self.config.embedding_size])
604
+
605
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
606
+ def call(
607
+ self,
608
+ input_ids: tf.Tensor = None,
609
+ position_ids: tf.Tensor = None,
610
+ token_type_ids: tf.Tensor = None,
611
+ inputs_embeds: tf.Tensor = None,
612
+ past_key_values_length=0,
613
+ training: bool = False,
614
+ ) -> tf.Tensor:
615
+ """
616
+ Applies embedding based on inputs tensor.
617
+
618
+ Returns:
619
+ final_embeddings (`tf.Tensor`): output embedding tensor.
620
+ """
621
+ if input_ids is None and inputs_embeds is None:
622
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
623
+
624
+ if input_ids is not None:
625
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
626
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
627
+
628
+ input_shape = shape_list(inputs_embeds)[:-1]
629
+
630
+ if token_type_ids is None:
631
+ token_type_ids = tf.fill(dims=input_shape, value=0)
632
+
633
+ if position_ids is None:
634
+ position_ids = tf.expand_dims(
635
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
636
+ )
637
+
638
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
639
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
640
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
641
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
642
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
643
+
644
+ return final_embeddings
645
+
646
+
647
+ class TFElectraDiscriminatorPredictions(keras.layers.Layer):
648
+ def __init__(self, config, **kwargs):
649
+ super().__init__(**kwargs)
650
+
651
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
652
+ self.dense_prediction = keras.layers.Dense(1, name="dense_prediction")
653
+ self.config = config
654
+
655
+ def call(self, discriminator_hidden_states, training=False):
656
+ hidden_states = self.dense(discriminator_hidden_states)
657
+ hidden_states = get_tf_activation(self.config.hidden_act)(hidden_states)
658
+ logits = tf.squeeze(self.dense_prediction(hidden_states), -1)
659
+
660
+ return logits
661
+
662
+ def build(self, input_shape=None):
663
+ if self.built:
664
+ return
665
+ self.built = True
666
+ if getattr(self, "dense", None) is not None:
667
+ with tf.name_scope(self.dense.name):
668
+ self.dense.build([None, None, self.config.hidden_size])
669
+ if getattr(self, "dense_prediction", None) is not None:
670
+ with tf.name_scope(self.dense_prediction.name):
671
+ self.dense_prediction.build([None, None, self.config.hidden_size])
672
+
673
+
674
+ class TFElectraGeneratorPredictions(keras.layers.Layer):
675
+ def __init__(self, config, **kwargs):
676
+ super().__init__(**kwargs)
677
+
678
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
679
+ self.dense = keras.layers.Dense(config.embedding_size, name="dense")
680
+ self.config = config
681
+
682
+ def call(self, generator_hidden_states, training=False):
683
+ hidden_states = self.dense(generator_hidden_states)
684
+ hidden_states = get_tf_activation("gelu")(hidden_states)
685
+ hidden_states = self.LayerNorm(hidden_states)
686
+
687
+ return hidden_states
688
+
689
+ def build(self, input_shape=None):
690
+ if self.built:
691
+ return
692
+ self.built = True
693
+ if getattr(self, "LayerNorm", None) is not None:
694
+ with tf.name_scope(self.LayerNorm.name):
695
+ self.LayerNorm.build([None, None, self.config.embedding_size])
696
+ if getattr(self, "dense", None) is not None:
697
+ with tf.name_scope(self.dense.name):
698
+ self.dense.build([None, None, self.config.hidden_size])
699
+
700
+
701
+ class TFElectraPreTrainedModel(TFPreTrainedModel):
702
+ """
703
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
704
+ models.
705
+ """
706
+
707
+ config_class = ElectraConfig
708
+ base_model_prefix = "electra"
709
+ # When the model is loaded from a PT model
710
+ _keys_to_ignore_on_load_unexpected = [r"generator_lm_head.weight"]
711
+ _keys_to_ignore_on_load_missing = [r"dropout"]
712
+
713
+
714
+ @keras_serializable
715
+ class TFElectraMainLayer(keras.layers.Layer):
716
+ config_class = ElectraConfig
717
+
718
+ def __init__(self, config, **kwargs):
719
+ super().__init__(**kwargs)
720
+
721
+ self.config = config
722
+ self.is_decoder = config.is_decoder
723
+
724
+ self.embeddings = TFElectraEmbeddings(config, name="embeddings")
725
+
726
+ if config.embedding_size != config.hidden_size:
727
+ self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project")
728
+
729
+ self.encoder = TFElectraEncoder(config, name="encoder")
730
+
731
+ def get_input_embeddings(self):
732
+ return self.embeddings
733
+
734
+ def set_input_embeddings(self, value):
735
+ self.embeddings.weight = value
736
+ self.embeddings.vocab_size = shape_list(value)[0]
737
+
738
+ def _prune_heads(self, heads_to_prune):
739
+ """
740
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
741
+ class PreTrainedModel
742
+ """
743
+ raise NotImplementedError
744
+
745
+ def get_extended_attention_mask(self, attention_mask, input_shape, dtype, past_key_values_length=0):
746
+ batch_size, seq_length = input_shape
747
+
748
+ if attention_mask is None:
749
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
750
+
751
+ # We create a 3D attention mask from a 2D tensor mask.
752
+ # Sizes are [batch_size, 1, 1, to_seq_length]
753
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
754
+ # this attention mask is more simple than the triangular masking of causal attention
755
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
756
+ attention_mask_shape = shape_list(attention_mask)
757
+
758
+ mask_seq_length = seq_length + past_key_values_length
759
+ # Copied from `modeling_tf_t5.py`
760
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
761
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
762
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
763
+ if self.is_decoder:
764
+ seq_ids = tf.range(mask_seq_length)
765
+ causal_mask = tf.less_equal(
766
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
767
+ seq_ids[None, :, None],
768
+ )
769
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
770
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
771
+ attention_mask_shape = shape_list(extended_attention_mask)
772
+ extended_attention_mask = tf.reshape(
773
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
774
+ )
775
+ if past_key_values_length > 0:
776
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
777
+ else:
778
+ extended_attention_mask = tf.reshape(
779
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
780
+ )
781
+
782
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
783
+ # masked positions, this operation will create a tensor which is 0.0 for
784
+ # positions we want to attend and -10000.0 for masked positions.
785
+ # Since we are adding it to the raw scores before the softmax, this is
786
+ # effectively the same as removing these entirely.
787
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=dtype)
788
+ one_cst = tf.constant(1.0, dtype=dtype)
789
+ ten_thousand_cst = tf.constant(-10000.0, dtype=dtype)
790
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
791
+
792
+ return extended_attention_mask
793
+
794
+ def get_head_mask(self, head_mask):
795
+ if head_mask is not None:
796
+ raise NotImplementedError
797
+ else:
798
+ head_mask = [None] * self.config.num_hidden_layers
799
+
800
+ return head_mask
801
+
802
+ @unpack_inputs
803
+ def call(
804
+ self,
805
+ input_ids: TFModelInputType | None = None,
806
+ attention_mask: np.ndarray | tf.Tensor | None = None,
807
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
808
+ position_ids: np.ndarray | tf.Tensor | None = None,
809
+ head_mask: np.ndarray | tf.Tensor | None = None,
810
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
811
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
812
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
813
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
814
+ use_cache: Optional[bool] = None,
815
+ output_attentions: Optional[bool] = None,
816
+ output_hidden_states: Optional[bool] = None,
817
+ return_dict: Optional[bool] = None,
818
+ training: Optional[bool] = False,
819
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
820
+ if not self.config.is_decoder:
821
+ use_cache = False
822
+
823
+ if input_ids is not None and inputs_embeds is not None:
824
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
825
+ elif input_ids is not None:
826
+ input_shape = shape_list(input_ids)
827
+ elif inputs_embeds is not None:
828
+ input_shape = shape_list(inputs_embeds)[:-1]
829
+ else:
830
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
831
+
832
+ batch_size, seq_length = input_shape
833
+
834
+ if past_key_values is None:
835
+ past_key_values_length = 0
836
+ past_key_values = [None] * len(self.encoder.layer)
837
+ else:
838
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
839
+
840
+ if attention_mask is None:
841
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
842
+
843
+ if token_type_ids is None:
844
+ token_type_ids = tf.fill(dims=input_shape, value=0)
845
+
846
+ hidden_states = self.embeddings(
847
+ input_ids=input_ids,
848
+ position_ids=position_ids,
849
+ token_type_ids=token_type_ids,
850
+ inputs_embeds=inputs_embeds,
851
+ past_key_values_length=past_key_values_length,
852
+ training=training,
853
+ )
854
+ extended_attention_mask = self.get_extended_attention_mask(
855
+ attention_mask, input_shape, hidden_states.dtype, past_key_values_length
856
+ )
857
+
858
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
859
+ if self.is_decoder and encoder_attention_mask is not None:
860
+ # If a 2D ou 3D attention mask is provided for the cross-attention
861
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
862
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
863
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
864
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
865
+ if num_dims_encoder_attention_mask == 3:
866
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
867
+ if num_dims_encoder_attention_mask == 2:
868
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
869
+
870
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
871
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
872
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
873
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
874
+
875
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
876
+ else:
877
+ encoder_extended_attention_mask = None
878
+
879
+ head_mask = self.get_head_mask(head_mask)
880
+
881
+ if hasattr(self, "embeddings_project"):
882
+ hidden_states = self.embeddings_project(hidden_states, training=training)
883
+
884
+ hidden_states = self.encoder(
885
+ hidden_states=hidden_states,
886
+ attention_mask=extended_attention_mask,
887
+ head_mask=head_mask,
888
+ encoder_hidden_states=encoder_hidden_states,
889
+ encoder_attention_mask=encoder_extended_attention_mask,
890
+ past_key_values=past_key_values,
891
+ use_cache=use_cache,
892
+ output_attentions=output_attentions,
893
+ output_hidden_states=output_hidden_states,
894
+ return_dict=return_dict,
895
+ training=training,
896
+ )
897
+
898
+ return hidden_states
899
+
900
+ def build(self, input_shape=None):
901
+ if self.built:
902
+ return
903
+ self.built = True
904
+ if getattr(self, "embeddings", None) is not None:
905
+ with tf.name_scope(self.embeddings.name):
906
+ self.embeddings.build(None)
907
+ if getattr(self, "encoder", None) is not None:
908
+ with tf.name_scope(self.encoder.name):
909
+ self.encoder.build(None)
910
+ if getattr(self, "embeddings_project", None) is not None:
911
+ with tf.name_scope(self.embeddings_project.name):
912
+ self.embeddings_project.build([None, None, self.config.embedding_size])
913
+
914
+
915
+ @dataclass
916
+ class TFElectraForPreTrainingOutput(ModelOutput):
917
+ """
918
+ Output type of [`TFElectraForPreTraining`].
919
+
920
+ Args:
921
+ loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
922
+ Total loss of the ELECTRA objective.
923
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
924
+ Prediction scores of the head (scores for each token before SoftMax).
925
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
926
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
927
+ `(batch_size, sequence_length, hidden_size)`.
928
+
929
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
930
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
931
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
932
+ sequence_length)`.
933
+
934
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
935
+ heads.
936
+ """
937
+
938
+ logits: tf.Tensor = None
939
+ hidden_states: Tuple[tf.Tensor] | None = None
940
+ attentions: Tuple[tf.Tensor] | None = None
941
+
942
+
943
+ ELECTRA_START_DOCSTRING = r"""
944
+
945
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
946
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
947
+ etc.)
948
+
949
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
950
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
951
+ behavior.
952
+
953
+ <Tip>
954
+
955
+ TensorFlow models and layers in `transformers` accept two formats as input:
956
+
957
+ - having all inputs as keyword arguments (like PyTorch models), or
958
+ - having all inputs as a list, tuple or dict in the first positional argument.
959
+
960
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
961
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
962
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
963
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
964
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
965
+ positional argument:
966
+
967
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
968
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
969
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
970
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
971
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
972
+
973
+ Note that when creating models and layers with
974
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
975
+ about any of this, as you can just pass inputs like you would to any other Python function!
976
+
977
+ </Tip>
978
+
979
+ Parameters:
980
+ config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
981
+ Initializing with a config file does not load the weights associated with the model, only the
982
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
983
+ """
984
+
985
+ ELECTRA_INPUTS_DOCSTRING = r"""
986
+ Args:
987
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
988
+ Indices of input sequence tokens in the vocabulary.
989
+
990
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
991
+ [`PreTrainedTokenizer.encode`] for details.
992
+
993
+ [What are input IDs?](../glossary#input-ids)
994
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
995
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
996
+
997
+ - 1 for tokens that are **not masked**,
998
+ - 0 for tokens that are **masked**.
999
+
1000
+ [What are attention masks?](../glossary#attention-mask)
1001
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1002
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1003
+ config.max_position_embeddings - 1]`.
1004
+
1005
+ [What are position IDs?](../glossary#position-ids)
1006
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1007
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1008
+
1009
+ - 1 indicates the head is **not masked**,
1010
+ - 0 indicates the head is **masked**.
1011
+
1012
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1013
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1014
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1015
+ model's internal embedding lookup matrix.
1016
+ output_attentions (`bool`, *optional*):
1017
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1018
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1019
+ config will be used instead.
1020
+ output_hidden_states (`bool`, *optional*):
1021
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1022
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1023
+ used instead.
1024
+ return_dict (`bool`, *optional*):
1025
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1026
+ eager mode, in graph mode the value will always be set to True.
1027
+ training (`bool`, *optional*, defaults to `False`):
1028
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1029
+ behaviors between training and evaluation).
1030
+ """
1031
+
1032
+
1033
+ @add_start_docstrings(
1034
+ "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
1035
+ "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
1036
+ "hidden size and embedding size are different. "
1037
+ ""
1038
+ "Both the generator and discriminator checkpoints may be loaded into this model.",
1039
+ ELECTRA_START_DOCSTRING,
1040
+ )
1041
+ class TFElectraModel(TFElectraPreTrainedModel):
1042
+ def __init__(self, config, *inputs, **kwargs):
1043
+ super().__init__(config, *inputs, **kwargs)
1044
+
1045
+ self.electra = TFElectraMainLayer(config, name="electra")
1046
+
1047
+ @unpack_inputs
1048
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1049
+ @add_code_sample_docstrings(
1050
+ checkpoint=_CHECKPOINT_FOR_DOC,
1051
+ output_type=TFBaseModelOutputWithPastAndCrossAttentions,
1052
+ config_class=_CONFIG_FOR_DOC,
1053
+ )
1054
+ def call(
1055
+ self,
1056
+ input_ids: TFModelInputType | None = None,
1057
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1058
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1059
+ position_ids: np.ndarray | tf.Tensor | None = None,
1060
+ head_mask: np.ndarray | tf.Tensor | None = None,
1061
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1062
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1063
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1064
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1065
+ use_cache: Optional[bool] = None,
1066
+ output_attentions: Optional[bool] = None,
1067
+ output_hidden_states: Optional[bool] = None,
1068
+ return_dict: Optional[bool] = None,
1069
+ training: Optional[bool] = False,
1070
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
1071
+ r"""
1072
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1073
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1074
+ the model is configured as a decoder.
1075
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1076
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1077
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1078
+
1079
+ - 1 for tokens that are **not masked**,
1080
+ - 0 for tokens that are **masked**.
1081
+
1082
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1083
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1084
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1085
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1086
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1087
+ use_cache (`bool`, *optional*, defaults to `True`):
1088
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1089
+ `past_key_values`). Set to `False` during training, `True` during generation
1090
+ """
1091
+ outputs = self.electra(
1092
+ input_ids=input_ids,
1093
+ attention_mask=attention_mask,
1094
+ token_type_ids=token_type_ids,
1095
+ position_ids=position_ids,
1096
+ head_mask=head_mask,
1097
+ encoder_hidden_states=encoder_hidden_states,
1098
+ encoder_attention_mask=encoder_attention_mask,
1099
+ past_key_values=past_key_values,
1100
+ use_cache=use_cache,
1101
+ inputs_embeds=inputs_embeds,
1102
+ output_attentions=output_attentions,
1103
+ output_hidden_states=output_hidden_states,
1104
+ return_dict=return_dict,
1105
+ training=training,
1106
+ )
1107
+
1108
+ return outputs
1109
+
1110
+ def build(self, input_shape=None):
1111
+ if self.built:
1112
+ return
1113
+ self.built = True
1114
+ if getattr(self, "electra", None) is not None:
1115
+ with tf.name_scope(self.electra.name):
1116
+ self.electra.build(None)
1117
+
1118
+
1119
+ @add_start_docstrings(
1120
+ """
1121
+ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
1122
+
1123
+ Even though both the discriminator and generator may be loaded into this model, the discriminator is the only model
1124
+ of the two to have the correct classification head to be used for this model.
1125
+ """,
1126
+ ELECTRA_START_DOCSTRING,
1127
+ )
1128
+ class TFElectraForPreTraining(TFElectraPreTrainedModel):
1129
+ def __init__(self, config, **kwargs):
1130
+ super().__init__(config, **kwargs)
1131
+
1132
+ self.electra = TFElectraMainLayer(config, name="electra")
1133
+ self.discriminator_predictions = TFElectraDiscriminatorPredictions(config, name="discriminator_predictions")
1134
+
1135
+ @unpack_inputs
1136
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1137
+ @replace_return_docstrings(output_type=TFElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1138
+ def call(
1139
+ self,
1140
+ input_ids: TFModelInputType | None = None,
1141
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1142
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1143
+ position_ids: np.ndarray | tf.Tensor | None = None,
1144
+ head_mask: np.ndarray | tf.Tensor | None = None,
1145
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1146
+ output_attentions: Optional[bool] = None,
1147
+ output_hidden_states: Optional[bool] = None,
1148
+ return_dict: Optional[bool] = None,
1149
+ training: Optional[bool] = False,
1150
+ ) -> Union[TFElectraForPreTrainingOutput, Tuple[tf.Tensor]]:
1151
+ r"""
1152
+ Returns:
1153
+
1154
+ Examples:
1155
+
1156
+ ```python
1157
+ >>> import tensorflow as tf
1158
+ >>> from transformers import AutoTokenizer, TFElectraForPreTraining
1159
+
1160
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-small-discriminator")
1161
+ >>> model = TFElectraForPreTraining.from_pretrained("google/electra-small-discriminator")
1162
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
1163
+ >>> outputs = model(input_ids)
1164
+ >>> scores = outputs[0]
1165
+ ```"""
1166
+ discriminator_hidden_states = self.electra(
1167
+ input_ids=input_ids,
1168
+ attention_mask=attention_mask,
1169
+ token_type_ids=token_type_ids,
1170
+ position_ids=position_ids,
1171
+ head_mask=head_mask,
1172
+ inputs_embeds=inputs_embeds,
1173
+ output_attentions=output_attentions,
1174
+ output_hidden_states=output_hidden_states,
1175
+ return_dict=return_dict,
1176
+ training=training,
1177
+ )
1178
+ discriminator_sequence_output = discriminator_hidden_states[0]
1179
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1180
+
1181
+ if not return_dict:
1182
+ return (logits,) + discriminator_hidden_states[1:]
1183
+
1184
+ return TFElectraForPreTrainingOutput(
1185
+ logits=logits,
1186
+ hidden_states=discriminator_hidden_states.hidden_states,
1187
+ attentions=discriminator_hidden_states.attentions,
1188
+ )
1189
+
1190
+ def build(self, input_shape=None):
1191
+ if self.built:
1192
+ return
1193
+ self.built = True
1194
+ if getattr(self, "electra", None) is not None:
1195
+ with tf.name_scope(self.electra.name):
1196
+ self.electra.build(None)
1197
+ if getattr(self, "discriminator_predictions", None) is not None:
1198
+ with tf.name_scope(self.discriminator_predictions.name):
1199
+ self.discriminator_predictions.build(None)
1200
+
1201
+
1202
+ class TFElectraMaskedLMHead(keras.layers.Layer):
1203
+ def __init__(self, config, input_embeddings, **kwargs):
1204
+ super().__init__(**kwargs)
1205
+
1206
+ self.config = config
1207
+ self.embedding_size = config.embedding_size
1208
+ self.input_embeddings = input_embeddings
1209
+
1210
+ def build(self, input_shape):
1211
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1212
+
1213
+ super().build(input_shape)
1214
+
1215
+ def get_output_embeddings(self):
1216
+ return self.input_embeddings
1217
+
1218
+ def set_output_embeddings(self, value):
1219
+ self.input_embeddings.weight = value
1220
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1221
+
1222
+ def get_bias(self):
1223
+ return {"bias": self.bias}
1224
+
1225
+ def set_bias(self, value):
1226
+ self.bias = value["bias"]
1227
+ self.config.vocab_size = shape_list(value["bias"])[0]
1228
+
1229
+ def call(self, hidden_states):
1230
+ seq_length = shape_list(tensor=hidden_states)[1]
1231
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
1232
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1233
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1234
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1235
+
1236
+ return hidden_states
1237
+
1238
+
1239
+ @add_start_docstrings(
1240
+ """
1241
+ Electra model with a language modeling head on top.
1242
+
1243
+ Even though both the discriminator and generator may be loaded into this model, the generator is the only model of
1244
+ the two to have been trained for the masked language modeling task.
1245
+ """,
1246
+ ELECTRA_START_DOCSTRING,
1247
+ )
1248
+ class TFElectraForMaskedLM(TFElectraPreTrainedModel, TFMaskedLanguageModelingLoss):
1249
+ def __init__(self, config, **kwargs):
1250
+ super().__init__(config, **kwargs)
1251
+
1252
+ self.config = config
1253
+ self.electra = TFElectraMainLayer(config, name="electra")
1254
+ self.generator_predictions = TFElectraGeneratorPredictions(config, name="generator_predictions")
1255
+
1256
+ if isinstance(config.hidden_act, str):
1257
+ self.activation = get_tf_activation(config.hidden_act)
1258
+ else:
1259
+ self.activation = config.hidden_act
1260
+
1261
+ self.generator_lm_head = TFElectraMaskedLMHead(config, self.electra.embeddings, name="generator_lm_head")
1262
+
1263
+ def get_lm_head(self):
1264
+ return self.generator_lm_head
1265
+
1266
+ def get_prefix_bias_name(self):
1267
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1268
+ return self.name + "/" + self.generator_lm_head.name
1269
+
1270
+ @unpack_inputs
1271
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1272
+ @add_code_sample_docstrings(
1273
+ checkpoint="google/electra-small-generator",
1274
+ output_type=TFMaskedLMOutput,
1275
+ config_class=_CONFIG_FOR_DOC,
1276
+ mask="[MASK]",
1277
+ expected_output="'paris'",
1278
+ expected_loss=1.22,
1279
+ )
1280
+ def call(
1281
+ self,
1282
+ input_ids: TFModelInputType | None = None,
1283
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1284
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1285
+ position_ids: np.ndarray | tf.Tensor | None = None,
1286
+ head_mask: np.ndarray | tf.Tensor | None = None,
1287
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1288
+ output_attentions: Optional[bool] = None,
1289
+ output_hidden_states: Optional[bool] = None,
1290
+ return_dict: Optional[bool] = None,
1291
+ labels: np.ndarray | tf.Tensor | None = None,
1292
+ training: Optional[bool] = False,
1293
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1294
+ r"""
1295
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1296
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1297
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1298
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1299
+ """
1300
+ generator_hidden_states = self.electra(
1301
+ input_ids=input_ids,
1302
+ attention_mask=attention_mask,
1303
+ token_type_ids=token_type_ids,
1304
+ position_ids=position_ids,
1305
+ head_mask=head_mask,
1306
+ inputs_embeds=inputs_embeds,
1307
+ output_attentions=output_attentions,
1308
+ output_hidden_states=output_hidden_states,
1309
+ return_dict=return_dict,
1310
+ training=training,
1311
+ )
1312
+ generator_sequence_output = generator_hidden_states[0]
1313
+ prediction_scores = self.generator_predictions(generator_sequence_output, training=training)
1314
+ prediction_scores = self.generator_lm_head(prediction_scores, training=training)
1315
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1316
+
1317
+ if not return_dict:
1318
+ output = (prediction_scores,) + generator_hidden_states[1:]
1319
+
1320
+ return ((loss,) + output) if loss is not None else output
1321
+
1322
+ return TFMaskedLMOutput(
1323
+ loss=loss,
1324
+ logits=prediction_scores,
1325
+ hidden_states=generator_hidden_states.hidden_states,
1326
+ attentions=generator_hidden_states.attentions,
1327
+ )
1328
+
1329
+ def build(self, input_shape=None):
1330
+ if self.built:
1331
+ return
1332
+ self.built = True
1333
+ if getattr(self, "electra", None) is not None:
1334
+ with tf.name_scope(self.electra.name):
1335
+ self.electra.build(None)
1336
+ if getattr(self, "generator_predictions", None) is not None:
1337
+ with tf.name_scope(self.generator_predictions.name):
1338
+ self.generator_predictions.build(None)
1339
+ if getattr(self, "generator_lm_head", None) is not None:
1340
+ with tf.name_scope(self.generator_lm_head.name):
1341
+ self.generator_lm_head.build(None)
1342
+
1343
+
1344
+ class TFElectraClassificationHead(keras.layers.Layer):
1345
+ """Head for sentence-level classification tasks."""
1346
+
1347
+ def __init__(self, config, **kwargs):
1348
+ super().__init__(**kwargs)
1349
+
1350
+ self.dense = keras.layers.Dense(
1351
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1352
+ )
1353
+ classifier_dropout = (
1354
+ config.classifhidden_dropout_probier_dropout
1355
+ if config.classifier_dropout is not None
1356
+ else config.hidden_dropout_prob
1357
+ )
1358
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1359
+ self.out_proj = keras.layers.Dense(
1360
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
1361
+ )
1362
+ self.config = config
1363
+
1364
+ def call(self, inputs, **kwargs):
1365
+ x = inputs[:, 0, :] # take <s> token (equiv. to [CLS])
1366
+ x = self.dropout(x)
1367
+ x = self.dense(x)
1368
+ x = get_tf_activation("gelu")(x) # although BERT uses tanh here, it seems Electra authors used gelu here
1369
+ x = self.dropout(x)
1370
+ x = self.out_proj(x)
1371
+
1372
+ return x
1373
+
1374
+ def build(self, input_shape=None):
1375
+ if self.built:
1376
+ return
1377
+ self.built = True
1378
+ if getattr(self, "dense", None) is not None:
1379
+ with tf.name_scope(self.dense.name):
1380
+ self.dense.build([None, None, self.config.hidden_size])
1381
+ if getattr(self, "out_proj", None) is not None:
1382
+ with tf.name_scope(self.out_proj.name):
1383
+ self.out_proj.build([None, None, self.config.hidden_size])
1384
+
1385
+
1386
+ @add_start_docstrings(
1387
+ """
1388
+ ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1389
+ pooled output) e.g. for GLUE tasks.
1390
+ """,
1391
+ ELECTRA_START_DOCSTRING,
1392
+ )
1393
+ class TFElectraForSequenceClassification(TFElectraPreTrainedModel, TFSequenceClassificationLoss):
1394
+ def __init__(self, config, *inputs, **kwargs):
1395
+ super().__init__(config, *inputs, **kwargs)
1396
+ self.num_labels = config.num_labels
1397
+ self.electra = TFElectraMainLayer(config, name="electra")
1398
+ self.classifier = TFElectraClassificationHead(config, name="classifier")
1399
+
1400
+ @unpack_inputs
1401
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1402
+ @add_code_sample_docstrings(
1403
+ checkpoint="bhadresh-savani/electra-base-emotion",
1404
+ output_type=TFSequenceClassifierOutput,
1405
+ config_class=_CONFIG_FOR_DOC,
1406
+ expected_output="'joy'",
1407
+ expected_loss=0.06,
1408
+ )
1409
+ def call(
1410
+ self,
1411
+ input_ids: TFModelInputType | None = None,
1412
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1413
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1414
+ position_ids: np.ndarray | tf.Tensor | None = None,
1415
+ head_mask: np.ndarray | tf.Tensor | None = None,
1416
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1417
+ output_attentions: Optional[bool] = None,
1418
+ output_hidden_states: Optional[bool] = None,
1419
+ return_dict: Optional[bool] = None,
1420
+ labels: np.ndarray | tf.Tensor | None = None,
1421
+ training: Optional[bool] = False,
1422
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1423
+ r"""
1424
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1425
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1426
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1427
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1428
+ """
1429
+ outputs = self.electra(
1430
+ input_ids=input_ids,
1431
+ attention_mask=attention_mask,
1432
+ token_type_ids=token_type_ids,
1433
+ position_ids=position_ids,
1434
+ head_mask=head_mask,
1435
+ inputs_embeds=inputs_embeds,
1436
+ output_attentions=output_attentions,
1437
+ output_hidden_states=output_hidden_states,
1438
+ return_dict=return_dict,
1439
+ training=training,
1440
+ )
1441
+ logits = self.classifier(outputs[0])
1442
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1443
+
1444
+ if not return_dict:
1445
+ output = (logits,) + outputs[1:]
1446
+
1447
+ return ((loss,) + output) if loss is not None else output
1448
+
1449
+ return TFSequenceClassifierOutput(
1450
+ loss=loss,
1451
+ logits=logits,
1452
+ hidden_states=outputs.hidden_states,
1453
+ attentions=outputs.attentions,
1454
+ )
1455
+
1456
+ def build(self, input_shape=None):
1457
+ if self.built:
1458
+ return
1459
+ self.built = True
1460
+ if getattr(self, "electra", None) is not None:
1461
+ with tf.name_scope(self.electra.name):
1462
+ self.electra.build(None)
1463
+ if getattr(self, "classifier", None) is not None:
1464
+ with tf.name_scope(self.classifier.name):
1465
+ self.classifier.build(None)
1466
+
1467
+
1468
+ @add_start_docstrings(
1469
+ """
1470
+ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1471
+ softmax) e.g. for RocStories/SWAG tasks.
1472
+ """,
1473
+ ELECTRA_START_DOCSTRING,
1474
+ )
1475
+ class TFElectraForMultipleChoice(TFElectraPreTrainedModel, TFMultipleChoiceLoss):
1476
+ def __init__(self, config, *inputs, **kwargs):
1477
+ super().__init__(config, *inputs, **kwargs)
1478
+
1479
+ self.electra = TFElectraMainLayer(config, name="electra")
1480
+ self.sequence_summary = TFSequenceSummary(
1481
+ config, initializer_range=config.initializer_range, name="sequence_summary"
1482
+ )
1483
+ self.classifier = keras.layers.Dense(
1484
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1485
+ )
1486
+ self.config = config
1487
+
1488
+ @unpack_inputs
1489
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1490
+ @add_code_sample_docstrings(
1491
+ checkpoint=_CHECKPOINT_FOR_DOC,
1492
+ output_type=TFMultipleChoiceModelOutput,
1493
+ config_class=_CONFIG_FOR_DOC,
1494
+ )
1495
+ def call(
1496
+ self,
1497
+ input_ids: TFModelInputType | None = None,
1498
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1499
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1500
+ position_ids: np.ndarray | tf.Tensor | None = None,
1501
+ head_mask: np.ndarray | tf.Tensor | None = None,
1502
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1503
+ output_attentions: Optional[bool] = None,
1504
+ output_hidden_states: Optional[bool] = None,
1505
+ return_dict: Optional[bool] = None,
1506
+ labels: np.ndarray | tf.Tensor | None = None,
1507
+ training: Optional[bool] = False,
1508
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1509
+ r"""
1510
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1511
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1512
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1513
+ """
1514
+
1515
+ if input_ids is not None:
1516
+ num_choices = shape_list(input_ids)[1]
1517
+ seq_length = shape_list(input_ids)[2]
1518
+ else:
1519
+ num_choices = shape_list(inputs_embeds)[1]
1520
+ seq_length = shape_list(inputs_embeds)[2]
1521
+
1522
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1523
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1524
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1525
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1526
+ flat_inputs_embeds = (
1527
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1528
+ if inputs_embeds is not None
1529
+ else None
1530
+ )
1531
+ outputs = self.electra(
1532
+ input_ids=flat_input_ids,
1533
+ attention_mask=flat_attention_mask,
1534
+ token_type_ids=flat_token_type_ids,
1535
+ position_ids=flat_position_ids,
1536
+ head_mask=head_mask,
1537
+ inputs_embeds=flat_inputs_embeds,
1538
+ output_attentions=output_attentions,
1539
+ output_hidden_states=output_hidden_states,
1540
+ return_dict=return_dict,
1541
+ training=training,
1542
+ )
1543
+ logits = self.sequence_summary(outputs[0])
1544
+ logits = self.classifier(logits)
1545
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1546
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1547
+
1548
+ if not return_dict:
1549
+ output = (reshaped_logits,) + outputs[1:]
1550
+
1551
+ return ((loss,) + output) if loss is not None else output
1552
+
1553
+ return TFMultipleChoiceModelOutput(
1554
+ loss=loss,
1555
+ logits=reshaped_logits,
1556
+ hidden_states=outputs.hidden_states,
1557
+ attentions=outputs.attentions,
1558
+ )
1559
+
1560
+ def build(self, input_shape=None):
1561
+ if self.built:
1562
+ return
1563
+ self.built = True
1564
+ if getattr(self, "electra", None) is not None:
1565
+ with tf.name_scope(self.electra.name):
1566
+ self.electra.build(None)
1567
+ if getattr(self, "sequence_summary", None) is not None:
1568
+ with tf.name_scope(self.sequence_summary.name):
1569
+ self.sequence_summary.build(None)
1570
+ if getattr(self, "classifier", None) is not None:
1571
+ with tf.name_scope(self.classifier.name):
1572
+ self.classifier.build([None, None, self.config.hidden_size])
1573
+
1574
+
1575
+ @add_start_docstrings(
1576
+ """
1577
+ Electra model with a token classification head on top.
1578
+
1579
+ Both the discriminator and generator may be loaded into this model.
1580
+ """,
1581
+ ELECTRA_START_DOCSTRING,
1582
+ )
1583
+ class TFElectraForTokenClassification(TFElectraPreTrainedModel, TFTokenClassificationLoss):
1584
+ def __init__(self, config, **kwargs):
1585
+ super().__init__(config, **kwargs)
1586
+
1587
+ self.electra = TFElectraMainLayer(config, name="electra")
1588
+ classifier_dropout = (
1589
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1590
+ )
1591
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1592
+ self.classifier = keras.layers.Dense(
1593
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1594
+ )
1595
+ self.config = config
1596
+
1597
+ @unpack_inputs
1598
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1599
+ @add_code_sample_docstrings(
1600
+ checkpoint="bhadresh-savani/electra-base-discriminator-finetuned-conll03-english",
1601
+ output_type=TFTokenClassifierOutput,
1602
+ config_class=_CONFIG_FOR_DOC,
1603
+ expected_output="['B-LOC', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-LOC', 'I-LOC']",
1604
+ expected_loss=0.11,
1605
+ )
1606
+ def call(
1607
+ self,
1608
+ input_ids: TFModelInputType | None = None,
1609
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1610
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1611
+ position_ids: np.ndarray | tf.Tensor | None = None,
1612
+ head_mask: np.ndarray | tf.Tensor | None = None,
1613
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1614
+ output_attentions: Optional[bool] = None,
1615
+ output_hidden_states: Optional[bool] = None,
1616
+ return_dict: Optional[bool] = None,
1617
+ labels: np.ndarray | tf.Tensor | None = None,
1618
+ training: Optional[bool] = False,
1619
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1620
+ r"""
1621
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1622
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1623
+ """
1624
+ discriminator_hidden_states = self.electra(
1625
+ input_ids=input_ids,
1626
+ attention_mask=attention_mask,
1627
+ token_type_ids=token_type_ids,
1628
+ position_ids=position_ids,
1629
+ head_mask=head_mask,
1630
+ inputs_embeds=inputs_embeds,
1631
+ output_attentions=output_attentions,
1632
+ output_hidden_states=output_hidden_states,
1633
+ return_dict=return_dict,
1634
+ training=training,
1635
+ )
1636
+ discriminator_sequence_output = discriminator_hidden_states[0]
1637
+ discriminator_sequence_output = self.dropout(discriminator_sequence_output)
1638
+ logits = self.classifier(discriminator_sequence_output)
1639
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1640
+
1641
+ if not return_dict:
1642
+ output = (logits,) + discriminator_hidden_states[1:]
1643
+
1644
+ return ((loss,) + output) if loss is not None else output
1645
+
1646
+ return TFTokenClassifierOutput(
1647
+ loss=loss,
1648
+ logits=logits,
1649
+ hidden_states=discriminator_hidden_states.hidden_states,
1650
+ attentions=discriminator_hidden_states.attentions,
1651
+ )
1652
+
1653
+ def build(self, input_shape=None):
1654
+ if self.built:
1655
+ return
1656
+ self.built = True
1657
+ if getattr(self, "electra", None) is not None:
1658
+ with tf.name_scope(self.electra.name):
1659
+ self.electra.build(None)
1660
+ if getattr(self, "classifier", None) is not None:
1661
+ with tf.name_scope(self.classifier.name):
1662
+ self.classifier.build([None, None, self.config.hidden_size])
1663
+
1664
+
1665
+ @add_start_docstrings(
1666
+ """
1667
+ Electra Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1668
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1669
+ """,
1670
+ ELECTRA_START_DOCSTRING,
1671
+ )
1672
+ class TFElectraForQuestionAnswering(TFElectraPreTrainedModel, TFQuestionAnsweringLoss):
1673
+ def __init__(self, config, *inputs, **kwargs):
1674
+ super().__init__(config, *inputs, **kwargs)
1675
+
1676
+ self.num_labels = config.num_labels
1677
+ self.electra = TFElectraMainLayer(config, name="electra")
1678
+ self.qa_outputs = keras.layers.Dense(
1679
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1680
+ )
1681
+ self.config = config
1682
+
1683
+ @unpack_inputs
1684
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1685
+ @add_code_sample_docstrings(
1686
+ checkpoint="bhadresh-savani/electra-base-squad2",
1687
+ output_type=TFQuestionAnsweringModelOutput,
1688
+ config_class=_CONFIG_FOR_DOC,
1689
+ qa_target_start_index=11,
1690
+ qa_target_end_index=12,
1691
+ expected_output="'a nice puppet'",
1692
+ expected_loss=2.64,
1693
+ )
1694
+ def call(
1695
+ self,
1696
+ input_ids: TFModelInputType | None = None,
1697
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1698
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1699
+ position_ids: np.ndarray | tf.Tensor | None = None,
1700
+ head_mask: np.ndarray | tf.Tensor | None = None,
1701
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1702
+ output_attentions: Optional[bool] = None,
1703
+ output_hidden_states: Optional[bool] = None,
1704
+ return_dict: Optional[bool] = None,
1705
+ start_positions: np.ndarray | tf.Tensor | None = None,
1706
+ end_positions: np.ndarray | tf.Tensor | None = None,
1707
+ training: Optional[bool] = False,
1708
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1709
+ r"""
1710
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1711
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1712
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1713
+ are not taken into account for computing the loss.
1714
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1715
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1716
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1717
+ are not taken into account for computing the loss.
1718
+ """
1719
+ discriminator_hidden_states = self.electra(
1720
+ input_ids=input_ids,
1721
+ attention_mask=attention_mask,
1722
+ token_type_ids=token_type_ids,
1723
+ position_ids=position_ids,
1724
+ head_mask=head_mask,
1725
+ inputs_embeds=inputs_embeds,
1726
+ output_attentions=output_attentions,
1727
+ output_hidden_states=output_hidden_states,
1728
+ return_dict=return_dict,
1729
+ training=training,
1730
+ )
1731
+ discriminator_sequence_output = discriminator_hidden_states[0]
1732
+ logits = self.qa_outputs(discriminator_sequence_output)
1733
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1734
+ start_logits = tf.squeeze(start_logits, axis=-1)
1735
+ end_logits = tf.squeeze(end_logits, axis=-1)
1736
+ loss = None
1737
+
1738
+ if start_positions is not None and end_positions is not None:
1739
+ labels = {"start_position": start_positions}
1740
+ labels["end_position"] = end_positions
1741
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1742
+
1743
+ if not return_dict:
1744
+ output = (
1745
+ start_logits,
1746
+ end_logits,
1747
+ ) + discriminator_hidden_states[1:]
1748
+
1749
+ return ((loss,) + output) if loss is not None else output
1750
+
1751
+ return TFQuestionAnsweringModelOutput(
1752
+ loss=loss,
1753
+ start_logits=start_logits,
1754
+ end_logits=end_logits,
1755
+ hidden_states=discriminator_hidden_states.hidden_states,
1756
+ attentions=discriminator_hidden_states.attentions,
1757
+ )
1758
+
1759
+ def build(self, input_shape=None):
1760
+ if self.built:
1761
+ return
1762
+ self.built = True
1763
+ if getattr(self, "electra", None) is not None:
1764
+ with tf.name_scope(self.electra.name):
1765
+ self.electra.build(None)
1766
+ if getattr(self, "qa_outputs", None) is not None:
1767
+ with tf.name_scope(self.qa_outputs.name):
1768
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+
30
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
31
+ def load_vocab(vocab_file):
32
+ """Loads a vocabulary file into a dictionary."""
33
+ vocab = collections.OrderedDict()
34
+ with open(vocab_file, "r", encoding="utf-8") as reader:
35
+ tokens = reader.readlines()
36
+ for index, token in enumerate(tokens):
37
+ token = token.rstrip("\n")
38
+ vocab[token] = index
39
+ return vocab
40
+
41
+
42
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->Electra,BERT->Electra
53
+ class ElectraTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a Electra tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original Electra).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_file,
100
+ do_lower_case=True,
101
+ do_basic_tokenize=True,
102
+ never_split=None,
103
+ unk_token="[UNK]",
104
+ sep_token="[SEP]",
105
+ pad_token="[PAD]",
106
+ cls_token="[CLS]",
107
+ mask_token="[MASK]",
108
+ tokenize_chinese_chars=True,
109
+ strip_accents=None,
110
+ **kwargs,
111
+ ):
112
+ if not os.path.isfile(vocab_file):
113
+ raise ValueError(
114
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
115
+ " model use `tokenizer = ElectraTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
116
+ )
117
+ self.vocab = load_vocab(vocab_file)
118
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
119
+ self.do_basic_tokenize = do_basic_tokenize
120
+ if do_basic_tokenize:
121
+ self.basic_tokenizer = BasicTokenizer(
122
+ do_lower_case=do_lower_case,
123
+ never_split=never_split,
124
+ tokenize_chinese_chars=tokenize_chinese_chars,
125
+ strip_accents=strip_accents,
126
+ )
127
+
128
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
129
+
130
+ super().__init__(
131
+ do_lower_case=do_lower_case,
132
+ do_basic_tokenize=do_basic_tokenize,
133
+ never_split=never_split,
134
+ unk_token=unk_token,
135
+ sep_token=sep_token,
136
+ pad_token=pad_token,
137
+ cls_token=cls_token,
138
+ mask_token=mask_token,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ **kwargs,
142
+ )
143
+
144
+ @property
145
+ def do_lower_case(self):
146
+ return self.basic_tokenizer.do_lower_case
147
+
148
+ @property
149
+ def vocab_size(self):
150
+ return len(self.vocab)
151
+
152
+ def get_vocab(self):
153
+ return dict(self.vocab, **self.added_tokens_encoder)
154
+
155
+ def _tokenize(self, text, split_special_tokens=False):
156
+ split_tokens = []
157
+ if self.do_basic_tokenize:
158
+ for token in self.basic_tokenizer.tokenize(
159
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
160
+ ):
161
+ # If the token is part of the never_split set
162
+ if token in self.basic_tokenizer.never_split:
163
+ split_tokens.append(token)
164
+ else:
165
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
166
+ else:
167
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
168
+ return split_tokens
169
+
170
+ def _convert_token_to_id(self, token):
171
+ """Converts a token (str) in an id using the vocab."""
172
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
173
+
174
+ def _convert_id_to_token(self, index):
175
+ """Converts an index (integer) in a token (str) using the vocab."""
176
+ return self.ids_to_tokens.get(index, self.unk_token)
177
+
178
+ def convert_tokens_to_string(self, tokens):
179
+ """Converts a sequence of tokens (string) in a single string."""
180
+ out_string = " ".join(tokens).replace(" ##", "").strip()
181
+ return out_string
182
+
183
+ def build_inputs_with_special_tokens(
184
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
185
+ ) -> List[int]:
186
+ """
187
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
188
+ adding special tokens. A Electra sequence has the following format:
189
+
190
+ - single sequence: `[CLS] X [SEP]`
191
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
192
+
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs to which the special tokens will be added.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+
199
+ Returns:
200
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
201
+ """
202
+ if token_ids_1 is None:
203
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
204
+ cls = [self.cls_token_id]
205
+ sep = [self.sep_token_id]
206
+ return cls + token_ids_0 + sep + token_ids_1 + sep
207
+
208
+ def get_special_tokens_mask(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
210
+ ) -> List[int]:
211
+ """
212
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
213
+ special tokens using the tokenizer `prepare_for_model` method.
214
+
215
+ Args:
216
+ token_ids_0 (`List[int]`):
217
+ List of IDs.
218
+ token_ids_1 (`List[int]`, *optional*):
219
+ Optional second list of IDs for sequence pairs.
220
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the token list is already formatted with special tokens for the model.
222
+
223
+ Returns:
224
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
225
+ """
226
+
227
+ if already_has_special_tokens:
228
+ return super().get_special_tokens_mask(
229
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
230
+ )
231
+
232
+ if token_ids_1 is not None:
233
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
234
+ return [1] + ([0] * len(token_ids_0)) + [1]
235
+
236
+ def create_token_type_ids_from_sequences(
237
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
238
+ ) -> List[int]:
239
+ """
240
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra sequence
241
+ pair mask has the following format:
242
+
243
+ ```
244
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
245
+ | first sequence | second sequence |
246
+ ```
247
+
248
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
249
+
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+
256
+ Returns:
257
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
258
+ """
259
+ sep = [self.sep_token_id]
260
+ cls = [self.cls_token_id]
261
+ if token_ids_1 is None:
262
+ return len(cls + token_ids_0 + sep) * [0]
263
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
264
+
265
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
266
+ index = 0
267
+ if os.path.isdir(save_directory):
268
+ vocab_file = os.path.join(
269
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
270
+ )
271
+ else:
272
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
273
+ with open(vocab_file, "w", encoding="utf-8") as writer:
274
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
275
+ if index != token_index:
276
+ logger.warning(
277
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
278
+ " Please check that the vocabulary is not corrupted!"
279
+ )
280
+ index = token_index
281
+ writer.write(token + "\n")
282
+ index += 1
283
+ return (vocab_file,)
284
+
285
+
286
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
287
+ class BasicTokenizer(object):
288
+ """
289
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
290
+
291
+ Args:
292
+ do_lower_case (`bool`, *optional*, defaults to `True`):
293
+ Whether or not to lowercase the input when tokenizing.
294
+ never_split (`Iterable`, *optional*):
295
+ Collection of tokens which will never be split during tokenization. Only has an effect when
296
+ `do_basic_tokenize=True`
297
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
298
+ Whether or not to tokenize Chinese characters.
299
+
300
+ This should likely be deactivated for Japanese (see this
301
+ [issue](https://github.com/huggingface/transformers/issues/328)).
302
+ strip_accents (`bool`, *optional*):
303
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
304
+ value for `lowercase` (as in the original BERT).
305
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
306
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
307
+ the full context of the words, such as contractions.
308
+ """
309
+
310
+ def __init__(
311
+ self,
312
+ do_lower_case=True,
313
+ never_split=None,
314
+ tokenize_chinese_chars=True,
315
+ strip_accents=None,
316
+ do_split_on_punc=True,
317
+ ):
318
+ if never_split is None:
319
+ never_split = []
320
+ self.do_lower_case = do_lower_case
321
+ self.never_split = set(never_split)
322
+ self.tokenize_chinese_chars = tokenize_chinese_chars
323
+ self.strip_accents = strip_accents
324
+ self.do_split_on_punc = do_split_on_punc
325
+
326
+ def tokenize(self, text, never_split=None):
327
+ """
328
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
329
+
330
+ Args:
331
+ never_split (`List[str]`, *optional*)
332
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
333
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
334
+ """
335
+ # union() returns a new set by concatenating the two sets.
336
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
337
+ text = self._clean_text(text)
338
+
339
+ # This was added on November 1st, 2018 for the multilingual and Chinese
340
+ # models. This is also applied to the English models now, but it doesn't
341
+ # matter since the English models were not trained on any Chinese data
342
+ # and generally don't have any Chinese data in them (there are Chinese
343
+ # characters in the vocabulary because Wikipedia does have some Chinese
344
+ # words in the English Wikipedia.).
345
+ if self.tokenize_chinese_chars:
346
+ text = self._tokenize_chinese_chars(text)
347
+ # prevents treating the same character with different unicode codepoints as different characters
348
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
349
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
350
+ split_tokens = []
351
+ for token in orig_tokens:
352
+ if token not in never_split:
353
+ if self.do_lower_case:
354
+ token = token.lower()
355
+ if self.strip_accents is not False:
356
+ token = self._run_strip_accents(token)
357
+ elif self.strip_accents:
358
+ token = self._run_strip_accents(token)
359
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
360
+
361
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
362
+ return output_tokens
363
+
364
+ def _run_strip_accents(self, text):
365
+ """Strips accents from a piece of text."""
366
+ text = unicodedata.normalize("NFD", text)
367
+ output = []
368
+ for char in text:
369
+ cat = unicodedata.category(char)
370
+ if cat == "Mn":
371
+ continue
372
+ output.append(char)
373
+ return "".join(output)
374
+
375
+ def _run_split_on_punc(self, text, never_split=None):
376
+ """Splits punctuation on a piece of text."""
377
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
378
+ return [text]
379
+ chars = list(text)
380
+ i = 0
381
+ start_new_word = True
382
+ output = []
383
+ while i < len(chars):
384
+ char = chars[i]
385
+ if _is_punctuation(char):
386
+ output.append([char])
387
+ start_new_word = True
388
+ else:
389
+ if start_new_word:
390
+ output.append([])
391
+ start_new_word = False
392
+ output[-1].append(char)
393
+ i += 1
394
+
395
+ return ["".join(x) for x in output]
396
+
397
+ def _tokenize_chinese_chars(self, text):
398
+ """Adds whitespace around any CJK character."""
399
+ output = []
400
+ for char in text:
401
+ cp = ord(char)
402
+ if self._is_chinese_char(cp):
403
+ output.append(" ")
404
+ output.append(char)
405
+ output.append(" ")
406
+ else:
407
+ output.append(char)
408
+ return "".join(output)
409
+
410
+ def _is_chinese_char(self, cp):
411
+ """Checks whether CP is the codepoint of a CJK character."""
412
+ # This defines a "chinese character" as anything in the CJK Unicode block:
413
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
414
+ #
415
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
416
+ # despite its name. The modern Korean Hangul alphabet is a different block,
417
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
418
+ # space-separated words, so they are not treated specially and handled
419
+ # like the all of the other languages.
420
+ if (
421
+ (cp >= 0x4E00 and cp <= 0x9FFF)
422
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
423
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
424
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
425
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
426
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
427
+ or (cp >= 0xF900 and cp <= 0xFAFF)
428
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
429
+ ): #
430
+ return True
431
+
432
+ return False
433
+
434
+ def _clean_text(self, text):
435
+ """Performs invalid character removal and whitespace cleanup on text."""
436
+ output = []
437
+ for char in text:
438
+ cp = ord(char)
439
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
440
+ continue
441
+ if _is_whitespace(char):
442
+ output.append(" ")
443
+ else:
444
+ output.append(char)
445
+ return "".join(output)
446
+
447
+
448
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
449
+ class WordpieceTokenizer(object):
450
+ """Runs WordPiece tokenization."""
451
+
452
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
453
+ self.vocab = vocab
454
+ self.unk_token = unk_token
455
+ self.max_input_chars_per_word = max_input_chars_per_word
456
+
457
+ def tokenize(self, text):
458
+ """
459
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
460
+ tokenization using the given vocabulary.
461
+
462
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
463
+
464
+ Args:
465
+ text: A single token or whitespace separated tokens. This should have
466
+ already been passed through *BasicTokenizer*.
467
+
468
+ Returns:
469
+ A list of wordpiece tokens.
470
+ """
471
+
472
+ output_tokens = []
473
+ for token in whitespace_tokenize(text):
474
+ chars = list(token)
475
+ if len(chars) > self.max_input_chars_per_word:
476
+ output_tokens.append(self.unk_token)
477
+ continue
478
+
479
+ is_bad = False
480
+ start = 0
481
+ sub_tokens = []
482
+ while start < len(chars):
483
+ end = len(chars)
484
+ cur_substr = None
485
+ while start < end:
486
+ substr = "".join(chars[start:end])
487
+ if start > 0:
488
+ substr = "##" + substr
489
+ if substr in self.vocab:
490
+ cur_substr = substr
491
+ break
492
+ end -= 1
493
+ if cur_substr is None:
494
+ is_bad = True
495
+ break
496
+ sub_tokens.append(cur_substr)
497
+ start = end
498
+
499
+ if is_bad:
500
+ output_tokens.append(self.unk_token)
501
+ else:
502
+ output_tokens.extend(sub_tokens)
503
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra_fast.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from .tokenization_electra import ElectraTokenizer
23
+
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
26
+
27
+
28
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with Bert->Electra , BERT->ELECTRA
29
+ class ElectraTokenizerFast(PreTrainedTokenizerFast):
30
+ r"""
31
+ Construct a "fast" ELECTRA tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
32
+
33
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
34
+ refer to this superclass for more information regarding those methods.
35
+
36
+ Args:
37
+ vocab_file (`str`):
38
+ File containing the vocabulary.
39
+ do_lower_case (`bool`, *optional*, defaults to `True`):
40
+ Whether or not to lowercase the input when tokenizing.
41
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
42
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
43
+ token instead.
44
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
45
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
46
+ sequence classification or for a text and a question for question answering. It is also used as the last
47
+ token of a sequence built with special tokens.
48
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
49
+ The token used for padding, for example when batching sequences of different lengths.
50
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
51
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
52
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
53
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
54
+ The token used for masking values. This is the token used when training this model with masked language
55
+ modeling. This is the token which the model will try to predict.
56
+ clean_text (`bool`, *optional*, defaults to `True`):
57
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
58
+ whitespaces by the classic one.
59
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
61
+ issue](https://github.com/huggingface/transformers/issues/328)).
62
+ strip_accents (`bool`, *optional*):
63
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
64
+ value for `lowercase` (as in the original ELECTRA).
65
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
66
+ The prefix for subwords.
67
+ """
68
+
69
+ vocab_files_names = VOCAB_FILES_NAMES
70
+ slow_tokenizer_class = ElectraTokenizer
71
+
72
+ def __init__(
73
+ self,
74
+ vocab_file=None,
75
+ tokenizer_file=None,
76
+ do_lower_case=True,
77
+ unk_token="[UNK]",
78
+ sep_token="[SEP]",
79
+ pad_token="[PAD]",
80
+ cls_token="[CLS]",
81
+ mask_token="[MASK]",
82
+ tokenize_chinese_chars=True,
83
+ strip_accents=None,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(
87
+ vocab_file,
88
+ tokenizer_file=tokenizer_file,
89
+ do_lower_case=do_lower_case,
90
+ unk_token=unk_token,
91
+ sep_token=sep_token,
92
+ pad_token=pad_token,
93
+ cls_token=cls_token,
94
+ mask_token=mask_token,
95
+ tokenize_chinese_chars=tokenize_chinese_chars,
96
+ strip_accents=strip_accents,
97
+ **kwargs,
98
+ )
99
+
100
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
101
+ if (
102
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
103
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
104
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
105
+ ):
106
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
107
+ normalizer_state["lowercase"] = do_lower_case
108
+ normalizer_state["strip_accents"] = strip_accents
109
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
110
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
111
+
112
+ self.do_lower_case = do_lower_case
113
+
114
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
115
+ """
116
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
117
+ adding special tokens. A ELECTRA sequence has the following format:
118
+
119
+ - single sequence: `[CLS] X [SEP]`
120
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
121
+
122
+ Args:
123
+ token_ids_0 (`List[int]`):
124
+ List of IDs to which the special tokens will be added.
125
+ token_ids_1 (`List[int]`, *optional*):
126
+ Optional second list of IDs for sequence pairs.
127
+
128
+ Returns:
129
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
130
+ """
131
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
132
+
133
+ if token_ids_1 is not None:
134
+ output += token_ids_1 + [self.sep_token_id]
135
+
136
+ return output
137
+
138
+ def create_token_type_ids_from_sequences(
139
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
140
+ ) -> List[int]:
141
+ """
142
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA sequence
143
+ pair mask has the following format:
144
+
145
+ ```
146
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
147
+ | first sequence | second sequence |
148
+ ```
149
+
150
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
160
+ """
161
+ sep = [self.sep_token_id]
162
+ cls = [self.cls_token_id]
163
+ if token_ids_1 is None:
164
+ return len(cls + token_ids_0 + sep) * [0]
165
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
166
+
167
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
168
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
169
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Meta AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for Musicgen Melody
17
+ """
18
+ import copy
19
+ from typing import Any, Dict, List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...audio_utils import chroma_filter_bank
24
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
25
+ from ...feature_extraction_utils import BatchFeature
26
+ from ...utils import TensorType, is_torch_available, is_torchaudio_available, logging
27
+
28
+
29
+ if is_torch_available():
30
+ import torch
31
+
32
+ if is_torchaudio_available():
33
+ import torchaudio
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+
38
+ class MusicgenMelodyFeatureExtractor(SequenceFeatureExtractor):
39
+ r"""
40
+ Constructs a MusicgenMelody feature extractor.
41
+
42
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
43
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
44
+
45
+ This class extracts chroma features from audio processed by [Demucs](https://github.com/adefossez/demucs/tree/main) or
46
+ directly from raw audio waveform.
47
+
48
+ Args:
49
+ feature_size (`int`, *optional*, defaults to 12):
50
+ The feature dimension of the extracted features.
51
+ sampling_rate (`int`, *optional*, defaults to 32000):
52
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
53
+ hop_length (`int`, *optional*, defaults to 4096):
54
+ Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.
55
+ chunk_length (`int`, *optional*, defaults to 30):
56
+ The maximum number of chunks of `sampling_rate` samples used to trim and pad longer or shorter audio
57
+ sequences.
58
+ n_fft (`int`, *optional*, defaults to 16384):
59
+ Size of the Fourier transform.
60
+ num_chroma (`int`, *optional*, defaults to 12):
61
+ Number of chroma bins to use.
62
+ padding_value (`float`, *optional*, defaults to 0.0):
63
+ Padding value used to pad the audio.
64
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
65
+ Whether to return the attention mask. Can be overwritten when calling the feature extractor.
66
+
67
+ [What are attention masks?](../glossary#attention-mask)
68
+
69
+ <Tip>
70
+
71
+ For Whisper models, `attention_mask` should always be passed for batched inference, to avoid subtle
72
+ bugs.
73
+
74
+ </Tip>
75
+ stem_indices (`List[int]`, *optional*, defaults to `[3, 2]`):
76
+ Stem channels to extract if demucs outputs are passed.
77
+ """
78
+
79
+ model_input_names = ["input_features"]
80
+
81
+ def __init__(
82
+ self,
83
+ feature_size=12,
84
+ sampling_rate=32000,
85
+ hop_length=4096,
86
+ chunk_length=30,
87
+ n_fft=16384,
88
+ num_chroma=12,
89
+ padding_value=0.0,
90
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
91
+ stem_indices=[3, 2],
92
+ **kwargs,
93
+ ):
94
+ super().__init__(
95
+ feature_size=feature_size,
96
+ sampling_rate=sampling_rate,
97
+ padding_value=padding_value,
98
+ return_attention_mask=return_attention_mask,
99
+ **kwargs,
100
+ )
101
+ self.n_fft = n_fft
102
+ self.hop_length = hop_length
103
+ self.chunk_length = chunk_length
104
+ self.n_samples = chunk_length * sampling_rate
105
+ self.sampling_rate = sampling_rate
106
+ self.chroma_filters = torch.from_numpy(
107
+ chroma_filter_bank(sampling_rate=sampling_rate, num_frequency_bins=n_fft, tuning=0, num_chroma=num_chroma)
108
+ ).float()
109
+ self.spectrogram = torchaudio.transforms.Spectrogram(
110
+ n_fft=n_fft, win_length=n_fft, hop_length=hop_length, power=2, center=True, pad=0, normalized=True
111
+ )
112
+ self.stem_indices = stem_indices
113
+
114
+ def _torch_extract_fbank_features(self, waveform: torch.Tensor) -> torch.Tensor:
115
+ """
116
+ Compute the chroma spectrogram of the provided audio using the torchaudio spectrogram implementation and the librosa chroma features.
117
+ """
118
+
119
+ # if wav length is not long enough, pad it
120
+ wav_length = waveform.shape[-1]
121
+ if wav_length < self.n_fft:
122
+ pad = self.n_fft - wav_length
123
+ rest = 0 if pad % 2 == 0 else 1
124
+ waveform = torch.nn.functional.pad(waveform, (pad // 2, pad // 2 + rest), "constant", 0)
125
+
126
+ # squeeze alongside channel dimension
127
+ spec = self.spectrogram(waveform).squeeze(1)
128
+
129
+ # sum along the frequency dimension
130
+ raw_chroma = torch.einsum("cf, ...ft->...ct", self.chroma_filters, spec)
131
+
132
+ # normalise with max value
133
+ norm_chroma = torch.nn.functional.normalize(raw_chroma, p=float("inf"), dim=-2, eps=1e-6)
134
+
135
+ # transpose time and chroma dimension -> (batch, time, chroma)
136
+ norm_chroma = norm_chroma.transpose(1, 2)
137
+
138
+ # replace max value alongside chroma dimension with 1 and replace the rest with 0
139
+ idx = norm_chroma.argmax(-1, keepdim=True)
140
+ norm_chroma[:] = 0
141
+ norm_chroma.scatter_(dim=-1, index=idx, value=1)
142
+
143
+ return norm_chroma
144
+
145
+ def _extract_stem_indices(self, audio, sampling_rate=None):
146
+ """
147
+ Extracts stems from the output of the [Demucs](https://github.com/adefossez/demucs/tree/main) audio separation model,
148
+ then converts to mono-channel and resample to the feature extractor sampling rate.
149
+
150
+ Args:
151
+ audio (`torch.Tensor` of shape `(batch_size, num_stems, channel_size, audio_length)`):
152
+ The output of the Demucs model to be processed.
153
+ sampling_rate (`int`, *optional*):
154
+ Demucs sampling rate. If not specified, defaults to `44000`.
155
+ """
156
+ sampling_rate = 44000 if sampling_rate is None else sampling_rate
157
+
158
+ # extract "vocals" and "others" sources from audio encoder (demucs) output
159
+ # [batch_size, num_stems, channel_size, audio_length]
160
+ wav = audio[:, torch.tensor(self.stem_indices)]
161
+
162
+ # merge extracted stems to single waveform
163
+ wav = wav.sum(1)
164
+
165
+ # convert to mono-channel waveform
166
+ wav = wav.mean(dim=1, keepdim=True)
167
+
168
+ # resample to model sampling rate
169
+ # not equivalent to julius.resample
170
+ if sampling_rate != self.sampling_rate:
171
+ wav = torchaudio.functional.resample(
172
+ wav, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24
173
+ )
174
+
175
+ # [batch_size, 1, audio_length] -> [batch_size, audio_length]
176
+ wav = wav.squeeze(1)
177
+
178
+ return wav
179
+
180
+ def __call__(
181
+ self,
182
+ audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
183
+ truncation: bool = True,
184
+ pad_to_multiple_of: Optional[int] = None,
185
+ return_tensors: Optional[Union[str, TensorType]] = None,
186
+ return_attention_mask: Optional[bool] = None,
187
+ padding: Optional[str] = True,
188
+ max_length: Optional[int] = None,
189
+ sampling_rate: Optional[int] = None,
190
+ **kwargs,
191
+ ) -> BatchFeature:
192
+ """
193
+ Main method to featurize and prepare for the model one or several sequence(s).
194
+
195
+ Args:
196
+ audio (`torch.Tensor`, `np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[float]]`):
197
+ The sequence or batch of sequences to be padded. Each sequence can be a torch tensor, a numpy array, a list of float
198
+ values, a list of numpy arrays, a list of torch tensors, or a list of list of float values.
199
+ If `audio` is the output of Demucs, it has to be a torch tensor of shape `(batch_size, num_stems, channel_size, audio_length)`.
200
+ Otherwise, it must be mono or stereo channel audio.
201
+ truncation (`bool`, *optional*, default to `True`):
202
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
203
+ pad_to_multiple_of (`int`, *optional*, defaults to None):
204
+ If set will pad the sequence to a multiple of the provided value.
205
+
206
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
207
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
208
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
209
+ If set, will return tensors instead of list of python integers. Acceptable values are:
210
+
211
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
212
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
213
+ - `'np'`: Return Numpy `np.ndarray` objects.
214
+ return_attention_mask (`bool`, *optional*):
215
+ Whether to return the attention mask. If left to the default, will return the attention mask according
216
+ to the specific feature_extractor's default.
217
+
218
+ [What are attention masks?](../glossary#attention-mask)
219
+
220
+ <Tip>
221
+ For Musicgen Melody models, audio `attention_mask` is not necessary.
222
+ </Tip>
223
+
224
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
225
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
226
+ index) among:
227
+
228
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
229
+ sequence if provided).
230
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
231
+ acceptable input length for the model if that argument is not provided.
232
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
233
+ lengths).
234
+ max_length (`int`, *optional*):
235
+ Maximum length of the returned list and optionally padding length (see above).
236
+ sampling_rate (`int`, *optional*):
237
+ The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
238
+ `sampling_rate` at the forward call to prevent silent errors.
239
+ Note that if `audio` is the output of Demucs, `sampling_rate` must be the sampling rate at which Demucs operates.
240
+ """
241
+
242
+ if sampling_rate is None:
243
+ logger.warning_once(
244
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
245
+ "Failing to do so can result in silent errors that might be hard to debug."
246
+ )
247
+
248
+ if isinstance(audio, torch.Tensor) and len(audio.shape) == 4:
249
+ logger.warning_once(
250
+ "`audio` is a 4-dimensional torch tensor and has thus been recognized as the output of `Demucs`. "
251
+ "If this is not the case, make sure to read Musicgen Melody docstrings and "
252
+ "to correct `audio` to get the right behaviour."
253
+ "Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody"
254
+ )
255
+ audio = self._extract_stem_indices(audio, sampling_rate=sampling_rate)
256
+ elif sampling_rate is not None and sampling_rate != self.sampling_rate:
257
+ audio = torchaudio.functional.resample(
258
+ audio, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24
259
+ )
260
+
261
+ is_batched = isinstance(audio, (np.ndarray, torch.Tensor)) and len(audio.shape) > 1
262
+ is_batched = is_batched or (
263
+ isinstance(audio, (list, tuple)) and (isinstance(audio[0], (torch.Tensor, np.ndarray, tuple, list)))
264
+ )
265
+
266
+ if is_batched and not isinstance(audio[0], torch.Tensor):
267
+ audio = [torch.tensor(speech, dtype=torch.float32).unsqueeze(-1) for speech in audio]
268
+ elif is_batched:
269
+ audio = [speech.unsqueeze(-1) for speech in audio]
270
+ elif not is_batched and not isinstance(audio, torch.Tensor):
271
+ audio = torch.tensor(audio, dtype=torch.float32).unsqueeze(-1)
272
+
273
+ if isinstance(audio[0], torch.Tensor) and audio[0].dtype is torch.float64:
274
+ audio = [speech.to(torch.float32) for speech in audio]
275
+
276
+ # always return batch
277
+ if not is_batched:
278
+ audio = [audio]
279
+
280
+ if len(audio[0].shape) == 3:
281
+ logger.warning_once(
282
+ "`audio` has been detected as a batch of stereo signals. Will be convert to mono signals. "
283
+ "If this is an undesired behaviour, make sure to read Musicgen Melody docstrings and "
284
+ "to correct `audio` to get the right behaviour."
285
+ "Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody"
286
+ )
287
+ # convert to mono-channel waveform
288
+ audio = [stereo.mean(dim=0) for stereo in audio]
289
+
290
+ batched_speech = BatchFeature({"input_features": audio})
291
+
292
+ padded_inputs = self.pad(
293
+ batched_speech,
294
+ padding=padding,
295
+ max_length=max_length if max_length else self.n_samples,
296
+ truncation=truncation,
297
+ pad_to_multiple_of=pad_to_multiple_of,
298
+ return_attention_mask=return_attention_mask,
299
+ return_tensors="pt",
300
+ )
301
+
302
+ input_features = self._torch_extract_fbank_features(padded_inputs["input_features"].squeeze(-1))
303
+
304
+ padded_inputs["input_features"] = input_features
305
+
306
+ if return_attention_mask:
307
+ # rescale from raw audio length to spectrogram length
308
+ padded_inputs["attention_mask"] = padded_inputs["attention_mask"][:, :: self.hop_length]
309
+
310
+ if return_tensors is not None:
311
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
312
+
313
+ return padded_inputs
314
+
315
+ def to_dict(self) -> Dict[str, Any]:
316
+ """
317
+ Serializes this instance to a Python dictionary. Returns:
318
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
319
+ """
320
+ output = copy.deepcopy(self.__dict__)
321
+ output["feature_extractor_type"] = self.__class__.__name__
322
+ if "mel_filters" in output:
323
+ del output["mel_filters"]
324
+ if "window" in output:
325
+ del output["window"]
326
+ if "chroma_filters" in output:
327
+ del output["chroma_filters"]
328
+ if "spectrogram" in output:
329
+ del output["spectrogram"]
330
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_wav2vec2_bert": [
21
+ "WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Wav2Vec2BertConfig",
23
+ ],
24
+ "processing_wav2vec2_bert": ["Wav2Vec2BertProcessor"],
25
+ }
26
+
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_wav2vec2_bert"] = [
35
+ "WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "Wav2Vec2BertForAudioFrameClassification",
37
+ "Wav2Vec2BertForCTC",
38
+ "Wav2Vec2BertForSequenceClassification",
39
+ "Wav2Vec2BertForXVector",
40
+ "Wav2Vec2BertModel",
41
+ "Wav2Vec2BertPreTrainedModel",
42
+ ]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_wav2vec2_bert import (
46
+ WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ Wav2Vec2BertConfig,
48
+ )
49
+ from .processing_wav2vec2_bert import Wav2Vec2BertProcessor
50
+
51
+ try:
52
+ if not is_torch_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .modeling_wav2vec2_bert import (
58
+ WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
59
+ Wav2Vec2BertForAudioFrameClassification,
60
+ Wav2Vec2BertForCTC,
61
+ Wav2Vec2BertForSequenceClassification,
62
+ Wav2Vec2BertForXVector,
63
+ Wav2Vec2BertModel,
64
+ Wav2Vec2BertPreTrainedModel,
65
+ )
66
+
67
+ else:
68
+ import sys
69
+
70
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/configuration_wav2vec2_bert.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/convert_wav2vec2_seamless_checkpoint.cpython-310.pyc ADDED
Binary file (5.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/modeling_wav2vec2_bert.cpython-310.pyc ADDED
Binary file (43.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/__pycache__/processing_wav2vec2_bert.cpython-310.pyc ADDED
Binary file (6.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Wav2Vec2Bert model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class Wav2Vec2BertConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`Wav2Vec2BertModel`]. It is used to
31
+ instantiate an Wav2Vec2Bert model according to the specified arguments, defining the model architecture.
32
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Bert
33
+ [facebook/wav2vec2-bert-rel-pos-large](https://huggingface.co/facebook/wav2vec2-bert-rel-pos-large)
34
+ architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*):
42
+ Vocabulary size of the Wav2Vec2Bert model. Defines the number of different tokens that can be
43
+ represented by the `inputs_ids` passed when calling [`Wav2Vec2BertModel`]. Vocabulary size of the
44
+ model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward
45
+ method of [`Wav2Vec2BertModel`].
46
+ hidden_size (`int`, *optional*, defaults to 1024):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 24):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 4096):
53
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
54
+ feature_projection_input_dim (`int`, *optional*, defaults to 160):
55
+ Input dimension of this model, i.e the dimension after processing input audios with [`SeamlessM4TFeatureExtractor`] or [`Wav2Vec2BertProcessor`].
56
+ hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
59
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ activation_dropout (`float`, *optional*, defaults to 0.0):
62
+ The dropout ratio for activations inside the fully connected layer.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout probability for the feature projection.
67
+ final_dropout (`float`, *optional*, defaults to 0.1):
68
+ The dropout probability for the final projection layer of [`Wav2Vec2BertForCTC`].
69
+ layerdrop (`float`, *optional*, defaults to 0.1):
70
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
71
+ details.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
75
+ The epsilon used by the layer normalization layers.
76
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
77
+ Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
78
+ [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
79
+ Recognition](https://arxiv.org/abs/1904.08779).
80
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
81
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
82
+ procecure generates `mask_time_prob*len(time_axis)/mask_time_length ``independent masks over the axis. If
83
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
84
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
85
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
86
+ mask_time_length (`int`, *optional*, defaults to 10):
87
+ Length of vector span along the time axis.
88
+ mask_time_min_masks (`int`, *optional*, defaults to 2):
89
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
90
+ irrespectively of `mask_feature_prob`. Only relevant if `mask_time_prob*len(time_axis)/mask_time_length <
91
+ mask_time_min_masks`.
92
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
93
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
94
+ masking procecure generates `mask_feature_prob*len(feature_axis)/mask_time_length` independent masks over
95
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
96
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
97
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
98
+ True`.
99
+ mask_feature_length (`int`, *optional*, defaults to 10):
100
+ Length of vector span along the feature axis.
101
+ mask_feature_min_masks (`int`, *optional*, defaults to 0):
102
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
103
+ step, irrespectively of `mask_feature_prob`. Only relevant if
104
+ `mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks`.
105
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
106
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
107
+ instance of [`Wav2Vec2BertForCTC`].
108
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
109
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
110
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
111
+ of [`Wav2Vec2BertForCTC`].
112
+ use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
113
+ Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
114
+ instance of [`Wav2Vec2BertForSequenceClassification`].
115
+ classifier_proj_size (`int`, *optional*, defaults to 768):
116
+ Dimensionality of the projection before token mean-pooling for classification.
117
+ tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
118
+ A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
119
+ module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
120
+ tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
121
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
122
+ *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
123
+ tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
124
+ A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
125
+ *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
126
+ xvector_output_dim (`int`, *optional*, defaults to 512):
127
+ Dimensionality of the *XVector* embedding vectors.
128
+ pad_token_id (`int`, *optional*, defaults to 0): The id of the _beginning-of-stream_ token.
129
+ bos_token_id (`int`, *optional*, defaults to 1): The id of the _padding_ token.
130
+ eos_token_id (`int`, *optional*, defaults to 2): The id of the _end-of-stream_ token.
131
+ add_adapter (`bool`, *optional*, defaults to `False`):
132
+ Whether a convolutional attention network should be stacked on top of the Wav2Vec2Bert Encoder. Can be very
133
+ useful for warm-starting Wav2Vec2Bert for SpeechEncoderDecoder models.
134
+ adapter_kernel_size (`int`, *optional*, defaults to 3):
135
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
136
+ adapter_stride (`int`, *optional*, defaults to 2):
137
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
138
+ num_adapter_layers (`int`, *optional*, defaults to 1):
139
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
140
+ True`.
141
+ adapter_act (`str` or `function`, *optional*, defaults to `"relu"`):
142
+ The non-linear activation function (function or string) in the adapter layers. If string, `"gelu"`,
143
+ `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
144
+ use_intermediate_ffn_before_adapter (`bool`, *optional*, defaults to `False`):
145
+ Whether an intermediate feed-forward block should be stacked on top of the Wav2Vec2Bert Encoder and before the adapter network.
146
+ Only relevant if `add_adapter is True`.
147
+ output_hidden_size (`int`, *optional*):
148
+ Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
149
+ if `add_adapter is True`.
150
+ position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`):
151
+ Can be specified to :
152
+ - `rotary`, for rotary position embeddings.
153
+ - `relative`, for relative position embeddings.
154
+ - `relative_key`, for relative position embeddings as defined by Shaw in [Self-Attention
155
+ with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
156
+ If left to `None`, no relative position embeddings is applied.
157
+ rotary_embedding_base (`int`, *optional*, defaults to 10000):
158
+ If `"rotary"` position embeddings are used, defines the size of the embedding base.
159
+ max_source_positions (`int`, *optional*, defaults to 5000):
160
+ if `"relative"` position embeddings are used, defines the maximum source input positions.
161
+ left_max_position_embeddings (`int`, *optional*, defaults to 64):
162
+ If `"relative_key"` (aka Shaw) position embeddings are used, defines the left clipping value for relative positions.
163
+ right_max_position_embeddings (`int`, *optional*, defaults to 8):
164
+ If `"relative_key"` (aka Shaw) position embeddings are used, defines the right clipping value for relative positions.
165
+ conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
166
+ Kernel size of convolutional depthwise 1D layer in Conformer blocks.
167
+ conformer_conv_dropout (`float`, *optional*, defaults to 0.1):
168
+ The dropout probability for all convolutional layers in Conformer blocks.
169
+ Example:
170
+
171
+ ```python
172
+ >>> from transformers import Wav2Vec2BertConfig, Wav2Vec2BertModel
173
+
174
+ >>> # Initializing a Wav2Vec2Bert facebook/wav2vec2-bert-rel-pos-large style configuration
175
+ >>> configuration = Wav2Vec2BertConfig()
176
+
177
+ >>> # Initializing a model (with random weights) from the facebook/wav2vec2-bert-rel-pos-large style configuration
178
+ >>> model = Wav2Vec2BertModel(configuration)
179
+
180
+ >>> # Accessing the model configuration
181
+ >>> configuration = model.config
182
+ ```"""
183
+
184
+ model_type = "wav2vec2-bert"
185
+
186
+ def __init__(
187
+ self,
188
+ vocab_size=None,
189
+ hidden_size=1024,
190
+ num_hidden_layers=24,
191
+ num_attention_heads=16,
192
+ intermediate_size=4096,
193
+ feature_projection_input_dim=160,
194
+ hidden_act="swish",
195
+ hidden_dropout=0.0,
196
+ activation_dropout=0.0,
197
+ attention_dropout=0.0,
198
+ feat_proj_dropout=0.0,
199
+ final_dropout=0.1,
200
+ layerdrop=0.1,
201
+ initializer_range=0.02,
202
+ layer_norm_eps=1e-5,
203
+ apply_spec_augment=True,
204
+ mask_time_prob=0.05,
205
+ mask_time_length=10,
206
+ mask_time_min_masks=2,
207
+ mask_feature_prob=0.0,
208
+ mask_feature_length=10,
209
+ mask_feature_min_masks=0,
210
+ ctc_loss_reduction="sum",
211
+ ctc_zero_infinity=False,
212
+ use_weighted_layer_sum=False,
213
+ classifier_proj_size=768,
214
+ tdnn_dim=(512, 512, 512, 512, 1500),
215
+ tdnn_kernel=(5, 3, 3, 1, 1),
216
+ tdnn_dilation=(1, 2, 3, 1, 1),
217
+ xvector_output_dim=512,
218
+ pad_token_id=0,
219
+ bos_token_id=1,
220
+ eos_token_id=2,
221
+ add_adapter=False,
222
+ adapter_kernel_size=3,
223
+ adapter_stride=2,
224
+ num_adapter_layers=1,
225
+ adapter_act="relu",
226
+ use_intermediate_ffn_before_adapter=False,
227
+ output_hidden_size=None,
228
+ position_embeddings_type="relative_key",
229
+ rotary_embedding_base=10000,
230
+ max_source_positions=5000,
231
+ left_max_position_embeddings=64,
232
+ right_max_position_embeddings=8,
233
+ conv_depthwise_kernel_size=31,
234
+ conformer_conv_dropout=0.1,
235
+ **kwargs,
236
+ ):
237
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
238
+ self.hidden_size = hidden_size
239
+ self.num_hidden_layers = num_hidden_layers
240
+ self.intermediate_size = intermediate_size
241
+ self.hidden_act = hidden_act
242
+ self.num_attention_heads = num_attention_heads
243
+ self.feature_projection_input_dim = feature_projection_input_dim
244
+ self.hidden_dropout = hidden_dropout
245
+ self.attention_dropout = attention_dropout
246
+ self.activation_dropout = activation_dropout
247
+ self.feat_proj_dropout = feat_proj_dropout
248
+ self.final_dropout = final_dropout
249
+ self.layerdrop = layerdrop
250
+ self.layer_norm_eps = layer_norm_eps
251
+ self.initializer_range = initializer_range
252
+ self.vocab_size = vocab_size
253
+ self.use_weighted_layer_sum = use_weighted_layer_sum
254
+ self.max_source_positions = max_source_positions
255
+
256
+ if position_embeddings_type is not None and position_embeddings_type not in [
257
+ "rotary",
258
+ "relative",
259
+ "relative_key",
260
+ ]:
261
+ raise ValueError(
262
+ """
263
+ `position_embeddings_type` is not valid. It must be one of the following values:
264
+ `["rotary", "relative", "relative_key"]` or left as `None`.
265
+ """
266
+ )
267
+ self.position_embeddings_type = position_embeddings_type
268
+ self.rotary_embedding_base = rotary_embedding_base
269
+ self.left_max_position_embeddings = left_max_position_embeddings
270
+ self.right_max_position_embeddings = right_max_position_embeddings
271
+
272
+ # Conformer-block related
273
+ self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
274
+ self.conformer_conv_dropout = conformer_conv_dropout
275
+
276
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
277
+ self.apply_spec_augment = apply_spec_augment
278
+ self.mask_time_prob = mask_time_prob
279
+ self.mask_time_length = mask_time_length
280
+ self.mask_time_min_masks = mask_time_min_masks
281
+ self.mask_feature_prob = mask_feature_prob
282
+ self.mask_feature_length = mask_feature_length
283
+ self.mask_feature_min_masks = mask_feature_min_masks
284
+
285
+ # ctc loss
286
+ self.ctc_loss_reduction = ctc_loss_reduction
287
+ self.ctc_zero_infinity = ctc_zero_infinity
288
+
289
+ # adapter
290
+ self.add_adapter = add_adapter
291
+ self.adapter_kernel_size = adapter_kernel_size
292
+ self.adapter_stride = adapter_stride
293
+ self.num_adapter_layers = num_adapter_layers
294
+ self.adapter_act = adapter_act
295
+ self.output_hidden_size = output_hidden_size if output_hidden_size is not None else hidden_size
296
+ if use_intermediate_ffn_before_adapter and not add_adapter:
297
+ raise ValueError("`use_intermediate_ffn_before_adapter` is `True` but `add_adapter` is `False`.")
298
+ self.use_intermediate_ffn_before_adapter = use_intermediate_ffn_before_adapter
299
+
300
+ # SequenceClassification-specific parameter. Feel free to ignore for other classes.
301
+ self.classifier_proj_size = classifier_proj_size
302
+
303
+ # XVector-specific parameters. Feel free to ignore for other classes.
304
+ self.tdnn_dim = list(tdnn_dim)
305
+ self.tdnn_kernel = list(tdnn_kernel)
306
+ self.tdnn_dilation = list(tdnn_dilation)
307
+ self.xvector_output_dim = xvector_output_dim
308
+
309
+ @property
310
+ def inputs_to_logits_ratio(self):
311
+ ratio = self.feature_projection_input_dim * 2
312
+ if self.add_adapter:
313
+ ratio = ratio * (self.adapter_stride**self.num_adapter_layers)
314
+ return ratio
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Wav2Vec2Bert BERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+ import torchaudio
22
+ from fairseq2.data import Collater
23
+ from fairseq2.data.audio import WaveformToFbankConverter
24
+ from fairseq2.nn.padding import get_seqs_and_padding_mask
25
+ from seamless_communication.models.conformer_shaw import load_conformer_shaw_model
26
+
27
+ from transformers import (
28
+ SeamlessM4TFeatureExtractor,
29
+ Wav2Vec2BertConfig,
30
+ Wav2Vec2BertModel,
31
+ logging,
32
+ )
33
+
34
+
35
+ logging.set_verbosity_info()
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ wav2vec_convert_list = [
40
+ ("encoder_frontend.model_dim_proj", "feature_projection.projection"),
41
+ ("encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"),
42
+ ("encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"),
43
+ ("encoder.inner.layers", "encoder.layers"),
44
+ ("encoder.inner_layer_norm", "encoder.layer_norm"),
45
+ ("encoder.adaptor_layers", "adapter.layers"),
46
+ ("inner_proj", "intermediate_dense"),
47
+ ("self_attn.output_proj", "self_attn.linear_out"),
48
+ ("output_proj", "output_dense"),
49
+ ("self_attn.k_proj", "self_attn.linear_k"),
50
+ ("self_attn.v_proj", "self_attn.linear_v"),
51
+ ("self_attn.q_proj", "self_attn.linear_q"),
52
+ ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"),
53
+ ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"),
54
+ ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"),
55
+ ("self_attn.sdpa.r_proj", "self_attn.linear_pos"),
56
+ ("conv.pointwise_conv1", "conv_module.pointwise_conv1"),
57
+ ("conv.pointwise_conv2", "conv_module.pointwise_conv2"),
58
+ ("conv.depthwise_conv", "conv_module.depthwise_conv"),
59
+ ("conv.layer_norm", "conv_module.depthwise_layer_norm"),
60
+ ("conv_layer_norm", "conv_module.layer_norm"),
61
+ ("encoder.proj1", "intermediate_ffn.intermediate_dense"),
62
+ ("encoder.proj2", "intermediate_ffn.output_dense"),
63
+ ("encoder.layer_norm", "inner_layer_norm"),
64
+ ("masker.temporal_mask_embed", "masked_spec_embed"),
65
+ ]
66
+
67
+ keys_to_remove = {
68
+ "quantizer.entry_proj",
69
+ "final_proj",
70
+ "final_target_proj",
71
+ "quantizer.entries",
72
+ "quantizer.num_updates",
73
+ }
74
+
75
+
76
+ def param_count(model):
77
+ return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
78
+
79
+
80
+ def _convert_model(
81
+ original_model,
82
+ hf_model,
83
+ convert_list,
84
+ ):
85
+ state_dict = original_model.state_dict()
86
+
87
+ for k, v in list(state_dict.items()):
88
+ new_key = k
89
+ for old_layer_name, new_layer_name in convert_list:
90
+ if old_layer_name in new_key:
91
+ new_key = new_key.replace(old_layer_name, new_layer_name)
92
+
93
+ # must do it by hand
94
+ if ".layer_norm" in new_key and new_key.split(".layer_norm")[0][-1].isnumeric():
95
+ new_key = new_key.replace("layer_norm", "final_layer_norm")
96
+
97
+ add_key = True
98
+ for key in keys_to_remove:
99
+ if key in new_key:
100
+ state_dict.pop(k)
101
+ add_key = False
102
+ break
103
+
104
+ if add_key:
105
+ state_dict[new_key] = state_dict.pop(k)
106
+
107
+ extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
108
+ extra_keys = set({k for k in extra_keys if "num_updates" not in k}) # filter unecessary param
109
+ missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
110
+ if len(extra_keys) != 0:
111
+ raise ValueError(f"extra keys found: {extra_keys}")
112
+ if len(missing_keys) != 0:
113
+ raise ValueError(f"missing keys: {missing_keys}")
114
+ hf_model.load_state_dict(state_dict, strict=True)
115
+ n_params = param_count(hf_model)
116
+
117
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params")
118
+
119
+ hf_model.eval()
120
+ del state_dict
121
+
122
+ return hf_model
123
+
124
+
125
+ @torch.no_grad()
126
+ def convert_wav2vec2_bert_checkpoint(
127
+ checkpoint_path,
128
+ pytorch_dump_folder_path,
129
+ config_path=None,
130
+ repo_id=None,
131
+ ):
132
+ """
133
+ Copy/paste/tweak model's weights to transformers design.
134
+ """
135
+ if config_path is not None:
136
+ config = Wav2Vec2BertConfig.from_pretrained(config_path, hidden_act="swish")
137
+ else:
138
+ config = Wav2Vec2BertConfig(apply_spec_augment=False)
139
+
140
+ hf_wav2vec = Wav2Vec2BertModel(config)
141
+
142
+ model = load_conformer_shaw_model(checkpoint_path, dtype=torch.float32)
143
+ model.eval()
144
+
145
+ hf_wav2vec = _convert_model(model, hf_wav2vec, wav2vec_convert_list)
146
+
147
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
148
+
149
+ if repo_id:
150
+ hf_wav2vec.push_to_hub(repo_id, create_pr=True)
151
+
152
+ # save feature extractor
153
+ fe = SeamlessM4TFeatureExtractor(padding_value=1)
154
+ fe._set_processor_class("Wav2Vec2BertProcessor")
155
+ fe.save_pretrained(pytorch_dump_folder_path)
156
+
157
+ if repo_id:
158
+ fe.push_to_hub(repo_id, create_pr=True)
159
+
160
+ if args.audio_path:
161
+ waveform, sample_rate = torchaudio.load(args.audio_path)
162
+ waveform = torchaudio.functional.resample(waveform, sample_rate, fe.sampling_rate)
163
+
164
+ fbank_converter = WaveformToFbankConverter(
165
+ num_mel_bins=80,
166
+ waveform_scale=2**15,
167
+ channel_last=True,
168
+ standardize=True,
169
+ dtype=torch.float32,
170
+ )
171
+ collater = Collater(pad_value=1)
172
+
173
+ decoded_audio = {"waveform": waveform.T, "sample_rate": fe.sampling_rate, "format": -1}
174
+ src = collater(fbank_converter(decoded_audio))["fbank"]
175
+ seqs, padding_mask = get_seqs_and_padding_mask(src)
176
+
177
+ with torch.inference_mode():
178
+ seqs, padding_mask = model.encoder_frontend(seqs, padding_mask)
179
+ original_output, padding_mask = model.encoder(seqs, padding_mask)
180
+
181
+ hf_wav2vec.eval()
182
+
183
+ inputs = fe(waveform, return_tensors="pt", padding=True)
184
+ with torch.no_grad():
185
+ outputs = hf_wav2vec(**inputs)
186
+
187
+ torch.testing.assert_close(original_output, outputs.last_hidden_state, atol=5e-3, rtol=5e-3)
188
+
189
+
190
+ if __name__ == "__main__":
191
+ parser = argparse.ArgumentParser()
192
+ parser.add_argument(
193
+ "--pytorch_dump_folder_path",
194
+ default=None,
195
+ type=str,
196
+ help="Path to the output PyTorch model.",
197
+ )
198
+ parser.add_argument(
199
+ "--checkpoint_path", default="conformer_shaw", type=str, help="Path to seamless communication checkpoint"
200
+ )
201
+ parser.add_argument(
202
+ "--config_path",
203
+ default=None,
204
+ type=str,
205
+ help="Path to hf config.json of model to convert",
206
+ )
207
+ parser.add_argument("--repo_id", default=None, type=str, help="Push to this repo id if precised.")
208
+ parser.add_argument(
209
+ "--audio_path",
210
+ default=None,
211
+ type=str,
212
+ help="If specified, check that the original model and the converted model produce the same outputs.",
213
+ )
214
+
215
+ args = parser.parse_args()
216
+ convert_wav2vec2_bert_checkpoint(
217
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.repo_id
218
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py ADDED
@@ -0,0 +1,1671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Seamless Authors and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Wav2Vec2-BERT model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
29
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
30
+ from ...modeling_outputs import (
31
+ BaseModelOutput,
32
+ CausalLMOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ Wav2Vec2BaseModelOutput,
36
+ XVectorOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel
39
+ from ...utils import (
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_peft_available,
44
+ logging,
45
+ )
46
+ from .configuration_wav2vec2_bert import Wav2Vec2BertConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ _HIDDEN_STATES_START_POSITION = 2
53
+
54
+ # General docstring
55
+ _CONFIG_FOR_DOC = "Wav2Vec2BertConfig"
56
+
57
+ # Base docstring
58
+ _BASE_CHECKPOINT_FOR_DOC = "facebook/w2v-bert-2.0"
59
+ _PRETRAINED_CHECKPOINT_FOR_DOC = "hf-audio/wav2vec2-bert-CV16-en"
60
+ _EXPECTED_OUTPUT_SHAPE = [1, 146, 1024]
61
+
62
+ # CTC docstring
63
+ _CTC_EXPECTED_OUTPUT = "'mr quilter is the apostle of the middle classes and we are glad to welcome his gospel'"
64
+ _CTC_EXPECTED_LOSS = 17.04
65
+
66
+
67
+ from ..deprecated._archive_maps import WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
68
+
69
+
70
+ # Copied from transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2._compute_new_attention_mask
71
+ def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):
72
+ """
73
+ Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that
74
+ stops at the corresponding element in `seq_lens`.
75
+ Args:
76
+ hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):
77
+ The sequences to mask, where `*` is any number of sequence-specific dimensions including none.
78
+ seq_lens (`torch.Tensor` of shape `(batch)`:
79
+ Each element represents the length of the sequence at the same index in `hidden_states`
80
+ Returns:
81
+ `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`
82
+ """
83
+ batch_size, mask_seq_len = hidden_states.shape[:2]
84
+
85
+ indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)
86
+
87
+ bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)
88
+
89
+ mask = hidden_states.new_ones((batch_size, mask_seq_len))
90
+
91
+ mask = mask.masked_fill(bool_mask, 0)
92
+
93
+ return mask
94
+
95
+
96
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
97
+ def _compute_mask_indices(
98
+ shape: Tuple[int, int],
99
+ mask_prob: float,
100
+ mask_length: int,
101
+ attention_mask: Optional[torch.LongTensor] = None,
102
+ min_masks: int = 0,
103
+ ) -> np.ndarray:
104
+ """
105
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
106
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
107
+ CPU as part of the preprocessing during training.
108
+
109
+ Args:
110
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
111
+ the first element is the batch size and the second element is the length of the axis to span.
112
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
113
+ independently generated mask spans of length `mask_length` is computed by
114
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
115
+ actual percentage will be smaller.
116
+ mask_length: size of the mask
117
+ min_masks: minimum number of masked spans
118
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
119
+ each batch dimension.
120
+ """
121
+ batch_size, sequence_length = shape
122
+
123
+ if mask_length < 1:
124
+ raise ValueError("`mask_length` has to be bigger than 0.")
125
+
126
+ if mask_length > sequence_length:
127
+ raise ValueError(
128
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
129
+ f" and `sequence_length`: {sequence_length}`"
130
+ )
131
+
132
+ # epsilon is used for probabilistic rounding
133
+ epsilon = np.random.rand(1).item()
134
+
135
+ def compute_num_masked_span(input_length):
136
+ """Given input length, compute how many spans should be masked"""
137
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
138
+ num_masked_span = max(num_masked_span, min_masks)
139
+
140
+ # make sure num masked span <= sequence_length
141
+ if num_masked_span * mask_length > sequence_length:
142
+ num_masked_span = sequence_length // mask_length
143
+
144
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
145
+ if input_length - (mask_length - 1) < num_masked_span:
146
+ num_masked_span = max(input_length - (mask_length - 1), 0)
147
+
148
+ return num_masked_span
149
+
150
+ # compute number of masked spans in batch
151
+ input_lengths = (
152
+ attention_mask.sum(-1).detach().tolist()
153
+ if attention_mask is not None
154
+ else [sequence_length for _ in range(batch_size)]
155
+ )
156
+
157
+ # SpecAugment mask to fill
158
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
159
+ spec_aug_mask_idxs = []
160
+
161
+ max_num_masked_span = compute_num_masked_span(sequence_length)
162
+
163
+ if max_num_masked_span == 0:
164
+ return spec_aug_mask
165
+
166
+ for input_length in input_lengths:
167
+ # compute num of masked spans for this input
168
+ num_masked_span = compute_num_masked_span(input_length)
169
+
170
+ # get random indices to mask
171
+ spec_aug_mask_idx = np.random.choice(
172
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
173
+ )
174
+
175
+ # pick first sampled index that will serve as a dummy index to pad vector
176
+ # to ensure same dimension for all batches due to probabilistic rounding
177
+ # Picking first sample just pads those vectors twice.
178
+ if len(spec_aug_mask_idx) == 0:
179
+ # this case can only happen if `input_length` is strictly smaller then
180
+ # `sequence_length` in which case the last token has to be a padding
181
+ # token which we can use as a dummy mask id
182
+ dummy_mask_idx = sequence_length - 1
183
+ else:
184
+ dummy_mask_idx = spec_aug_mask_idx[0]
185
+
186
+ spec_aug_mask_idx = np.concatenate(
187
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
188
+ )
189
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
190
+
191
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
192
+
193
+ # expand masked indices to masked spans
194
+ spec_aug_mask_idxs = np.broadcast_to(
195
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
196
+ )
197
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
198
+
199
+ # add offset to the starting indexes so that indexes now create a span
200
+ offsets = np.arange(mask_length)[None, None, :]
201
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
202
+ batch_size, max_num_masked_span * mask_length
203
+ )
204
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
205
+
206
+ # ensure that we cannot have indices larger than sequence_length
207
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
208
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
209
+
210
+ # scatter indices to mask
211
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
212
+
213
+ return spec_aug_mask
214
+
215
+
216
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices
217
+ def _sample_negative_indices(
218
+ features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None
219
+ ):
220
+ """
221
+ Sample `num_negatives` vectors from feature vectors.
222
+ """
223
+ batch_size, sequence_length = features_shape
224
+
225
+ # generate indices of the positive vectors themselves, repeat them `num_negatives` times
226
+ sequence_length_range = np.arange(sequence_length)
227
+
228
+ # get `num_negatives` random vector indices from the same utterance
229
+ sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
230
+
231
+ mask_time_indices = (
232
+ mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool)
233
+ )
234
+
235
+ for batch_idx in range(batch_size):
236
+ high = mask_time_indices[batch_idx].sum() - 1
237
+ mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]]
238
+
239
+ feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives))
240
+ sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives))
241
+ # avoid sampling the same positive vector, but keep the distribution uniform
242
+ sampled_indices[sampled_indices >= feature_indices] += 1
243
+
244
+ # remap to actual indices
245
+ sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices]
246
+
247
+ # correct for batch size
248
+ sampled_negative_indices[batch_idx] += batch_idx * sequence_length
249
+
250
+ return sampled_negative_indices
251
+
252
+
253
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRotaryPositionalEmbedding with Wav2Vec2Conformer->Wav2Vec2Bert
254
+ class Wav2Vec2BertRotaryPositionalEmbedding(nn.Module):
255
+ """Rotary positional embedding
256
+ Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf
257
+ """
258
+
259
+ def __init__(self, config):
260
+ super().__init__()
261
+ dim = config.hidden_size // config.num_attention_heads
262
+ base = config.rotary_embedding_base
263
+
264
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
265
+ # Ignore copy
266
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
267
+ self.cached_sequence_length = None
268
+ self.cached_rotary_positional_embedding = None
269
+
270
+ def forward(self, hidden_states):
271
+ sequence_length = hidden_states.shape[1]
272
+
273
+ if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
274
+ return self.cached_rotary_positional_embedding
275
+
276
+ self.cached_sequence_length = sequence_length
277
+ # Embeddings are computed in the dtype of the inv_freq constant
278
+ time_stamps = torch.arange(sequence_length).type_as(self.inv_freq)
279
+ freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
280
+ embeddings = torch.cat((freqs, freqs), dim=-1)
281
+
282
+ cos_embeddings = embeddings.cos()[:, None, None, :]
283
+ sin_embeddings = embeddings.sin()[:, None, None, :]
284
+ # Computed embeddings are cast to the dtype of the hidden state inputs
285
+ self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings]).type_as(hidden_states)
286
+ return self.cached_rotary_positional_embedding
287
+
288
+
289
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerRelPositionalEmbedding with Wav2Vec2Conformer->Wav2Vec2Bert
290
+ class Wav2Vec2BertRelPositionalEmbedding(nn.Module):
291
+ """Relative positional encoding module."""
292
+
293
+ def __init__(self, config):
294
+ super().__init__()
295
+ self.max_len = config.max_source_positions
296
+ self.d_model = config.hidden_size
297
+ self.pe = None
298
+ self.extend_pe(torch.tensor(0.0).expand(1, self.max_len))
299
+
300
+ def extend_pe(self, x):
301
+ # Reset the positional encodings
302
+ if self.pe is not None:
303
+ # self.pe contains both positive and negative parts
304
+ # the length of self.pe is 2 * input_len - 1
305
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
306
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
307
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
308
+ return
309
+ # Suppose `i` is the position of query vector and `j` is the
310
+ # position of key vector. We use positive relative positions when keys
311
+ # are to the left (i>j) and negative relative positions otherwise (i<j).
312
+ pe_positive = torch.zeros(x.size(1), self.d_model)
313
+ pe_negative = torch.zeros(x.size(1), self.d_model)
314
+ position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1)
315
+ div_term = torch.exp(
316
+ torch.arange(0, self.d_model, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.d_model)
317
+ )
318
+ pe_positive[:, 0::2] = torch.sin(position * div_term)
319
+ pe_positive[:, 1::2] = torch.cos(position * div_term)
320
+ pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
321
+ pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
322
+
323
+ # Reverse the order of positive indices and concat both positive and
324
+ # negative indices. This is used to support the shifting trick
325
+ # as in https://arxiv.org/abs/1901.02860
326
+ pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
327
+ pe_negative = pe_negative[1:].unsqueeze(0)
328
+ pe = torch.cat([pe_positive, pe_negative], dim=1)
329
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
330
+
331
+ def forward(self, hidden_states: torch.Tensor):
332
+ self.extend_pe(hidden_states)
333
+ start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1
334
+ end_idx = self.pe.size(1) // 2 + hidden_states.size(1)
335
+ relative_position_embeddings = self.pe[:, start_idx:end_idx]
336
+
337
+ return relative_position_embeddings
338
+
339
+
340
+ class Wav2Vec2BertFeatureProjection(nn.Module):
341
+ def __init__(self, config):
342
+ super().__init__()
343
+ self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps)
344
+ self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size)
345
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
346
+
347
+ def forward(self, hidden_states):
348
+ # non-projected hidden states are needed for quantization
349
+ norm_hidden_states = self.layer_norm(hidden_states)
350
+ hidden_states = self.projection(norm_hidden_states)
351
+ hidden_states = self.dropout(hidden_states)
352
+ return hidden_states, norm_hidden_states
353
+
354
+
355
+ class Wav2Vec2BertFeedForward(nn.Module):
356
+ def __init__(self, config, act_fn=None, hidden_size=None):
357
+ super().__init__()
358
+ act_fn = act_fn if act_fn is not None else config.hidden_act
359
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
360
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
361
+
362
+ self.intermediate_dense = nn.Linear(hidden_size, config.intermediate_size)
363
+ self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn
364
+
365
+ self.output_dense = nn.Linear(config.intermediate_size, hidden_size)
366
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
367
+
368
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward.forward
369
+ def forward(self, hidden_states):
370
+ hidden_states = self.intermediate_dense(hidden_states)
371
+ hidden_states = self.intermediate_act_fn(hidden_states)
372
+ hidden_states = self.intermediate_dropout(hidden_states)
373
+
374
+ hidden_states = self.output_dense(hidden_states)
375
+ hidden_states = self.output_dropout(hidden_states)
376
+ return hidden_states
377
+
378
+
379
+ class Wav2Vec2BertConvolutionModule(nn.Module):
380
+ """Convolution block used in the conformer block"""
381
+
382
+ def __init__(self, config):
383
+ super().__init__()
384
+ if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
385
+ raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
386
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
387
+ self.pointwise_conv1 = nn.Conv1d(
388
+ config.hidden_size,
389
+ 2 * config.hidden_size,
390
+ kernel_size=1,
391
+ stride=1,
392
+ padding=0,
393
+ bias=False,
394
+ )
395
+ self.glu = nn.GLU(dim=1)
396
+ self.depthwise_conv = nn.Conv1d(
397
+ config.hidden_size,
398
+ config.hidden_size,
399
+ config.conv_depthwise_kernel_size,
400
+ stride=1,
401
+ padding=0,
402
+ groups=config.hidden_size,
403
+ bias=False,
404
+ )
405
+
406
+ self.depthwise_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
407
+ self.activation = ACT2FN[config.hidden_act]
408
+ self.pointwise_conv2 = nn.Conv1d(
409
+ config.hidden_size,
410
+ config.hidden_size,
411
+ kernel_size=1,
412
+ stride=1,
413
+ padding=0,
414
+ bias=False,
415
+ )
416
+ self.dropout = nn.Dropout(config.conformer_conv_dropout)
417
+
418
+ def forward(self, hidden_states, attention_mask=None):
419
+ hidden_states = self.layer_norm(hidden_states)
420
+
421
+ # Ensure that we do not leak padded positions in depthwise convolution if attention mask is passed.
422
+ # Put 0 where necessary
423
+ if attention_mask is not None:
424
+ hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
425
+
426
+ # exchange the temporal dimension and the feature dimension
427
+ hidden_states = hidden_states.transpose(1, 2)
428
+
429
+ # GLU mechanism
430
+ # => (batch, 2*channel, dim)
431
+ hidden_states = self.pointwise_conv1(hidden_states)
432
+ # => (batch, channel, dim)
433
+ hidden_states = self.glu(hidden_states)
434
+
435
+ # Pad the sequence entirely on the left because of causal convolution.
436
+ hidden_states = torch.nn.functional.pad(hidden_states, (self.depthwise_conv.kernel_size[0] - 1, 0))
437
+
438
+ # 1D Depthwise Conv
439
+ hidden_states = self.depthwise_conv(hidden_states)
440
+
441
+ hidden_states = self.depthwise_layer_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
442
+
443
+ hidden_states = self.activation(hidden_states)
444
+
445
+ hidden_states = self.pointwise_conv2(hidden_states)
446
+ hidden_states = self.dropout(hidden_states)
447
+ hidden_states = hidden_states.transpose(1, 2)
448
+ return hidden_states
449
+
450
+
451
+ class Wav2Vec2BertSelfAttention(nn.Module):
452
+ """Construct an Wav2Vec2BertSelfAttention object.
453
+ Can be enhanced with rotary or relative position embeddings.
454
+ """
455
+
456
+ def __init__(self, config, is_adapter_attention=False):
457
+ super().__init__()
458
+ hidden_size = config.hidden_size if not is_adapter_attention else config.output_hidden_size
459
+
460
+ self.head_size = hidden_size // config.num_attention_heads
461
+ self.num_heads = config.num_attention_heads
462
+ self.position_embeddings_type = config.position_embeddings_type if not is_adapter_attention else None
463
+
464
+ self.linear_q = nn.Linear(hidden_size, hidden_size)
465
+ self.linear_k = nn.Linear(hidden_size, hidden_size)
466
+ self.linear_v = nn.Linear(hidden_size, hidden_size)
467
+ self.linear_out = nn.Linear(hidden_size, hidden_size)
468
+
469
+ self.dropout = nn.Dropout(p=config.attention_dropout)
470
+
471
+ if self.position_embeddings_type == "relative":
472
+ # linear transformation for positional encoding
473
+ self.linear_pos = nn.Linear(hidden_size, hidden_size, bias=False)
474
+ # these two learnable bias are used in matrix c and matrix d
475
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
476
+ self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
477
+ self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size))
478
+
479
+ if self.position_embeddings_type == "relative_key":
480
+ self.left_max_position_embeddings = config.left_max_position_embeddings
481
+ self.right_max_position_embeddings = config.right_max_position_embeddings
482
+ num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1
483
+ self.distance_embedding = nn.Embedding(num_positions, self.head_size)
484
+
485
+ def forward(
486
+ self,
487
+ hidden_states: torch.Tensor,
488
+ attention_mask: Optional[torch.Tensor] = None,
489
+ relative_position_embeddings: Optional[torch.Tensor] = None,
490
+ output_attentions: bool = False,
491
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
492
+ # self-attention mechanism
493
+ batch_size, sequence_length, hidden_size = hidden_states.size()
494
+
495
+ # make sure query/key states can be != value states
496
+ query_key_states = hidden_states
497
+ value_states = hidden_states
498
+
499
+ if self.position_embeddings_type == "rotary":
500
+ if relative_position_embeddings is None:
501
+ raise ValueError(
502
+ "`relative_position_embeddings` has to be defined when `self.position_embeddings_type == 'rotary'"
503
+ )
504
+ query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings)
505
+
506
+ # project query_key_states and value_states
507
+ query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
508
+ key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
509
+ value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
510
+
511
+ # => (batch, head, time1, d_k)
512
+ query = query.transpose(1, 2)
513
+ key = key.transpose(1, 2)
514
+ value = value.transpose(1, 2)
515
+
516
+ if self.position_embeddings_type == "relative":
517
+ if relative_position_embeddings is None:
518
+ raise ValueError(
519
+ "`relative_position_embeddings` has to be defined when `self.position_embeddings_type =="
520
+ " 'relative'"
521
+ )
522
+ # apply relative_position_embeddings to qk scores
523
+ # as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860
524
+ scores = self._apply_relative_embeddings(
525
+ query=query, key=key, relative_position_embeddings=relative_position_embeddings
526
+ )
527
+ else:
528
+ scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
529
+
530
+ if self.position_embeddings_type == "relative_key":
531
+ query_length, key_length = query.shape[2], key.shape[2]
532
+
533
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
534
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
535
+ distance = position_ids_r - position_ids_l
536
+ distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings)
537
+
538
+ positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings)
539
+ positional_embedding = positional_embedding.to(dtype=query.dtype) # fp16 compatibility
540
+
541
+ relative_position_attn_weights = torch.einsum("bhld,lrd->bhlr", query, positional_embedding)
542
+ scores = scores + (relative_position_attn_weights / math.sqrt(self.head_size))
543
+
544
+ # apply attention_mask if necessary
545
+ if attention_mask is not None:
546
+ scores = scores + attention_mask
547
+
548
+ # => (batch, head, time1, time2)
549
+ probs = torch.softmax(scores, dim=-1)
550
+ probs = self.dropout(probs)
551
+
552
+ # => (batch, head, time1, d_k)
553
+ hidden_states = torch.matmul(probs, value)
554
+
555
+ # => (batch, time1, hidden_size)
556
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
557
+ hidden_states = self.linear_out(hidden_states)
558
+
559
+ return hidden_states, probs
560
+
561
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_rotary_embedding
562
+ def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings):
563
+ batch_size, sequence_length, hidden_size = hidden_states.size()
564
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size)
565
+
566
+ cos = relative_position_embeddings[0, :sequence_length, ...]
567
+ sin = relative_position_embeddings[1, :sequence_length, ...]
568
+
569
+ # rotate hidden_states with rotary embeddings
570
+ hidden_states = hidden_states.transpose(0, 1)
571
+ rotated_states_begin = hidden_states[..., : self.head_size // 2]
572
+ rotated_states_end = hidden_states[..., self.head_size // 2 :]
573
+ rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1)
574
+ hidden_states = (hidden_states * cos) + (rotated_states * sin)
575
+ hidden_states = hidden_states.transpose(0, 1)
576
+
577
+ hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size)
578
+
579
+ return hidden_states
580
+
581
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerSelfAttention._apply_relative_embeddings
582
+ def _apply_relative_embeddings(self, query, key, relative_position_embeddings):
583
+ # 1. project positional embeddings
584
+ # => (batch, head, 2*time1-1, d_k)
585
+ proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings)
586
+ proj_relative_position_embeddings = proj_relative_position_embeddings.view(
587
+ relative_position_embeddings.size(0), -1, self.num_heads, self.head_size
588
+ )
589
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2)
590
+ proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3)
591
+
592
+ # 2. Add bias to query
593
+ # => (batch, head, time1, d_k)
594
+ query = query.transpose(1, 2)
595
+ q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2)
596
+ q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2)
597
+
598
+ # 3. attention score: first compute matrix a and matrix c
599
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
600
+ # => (batch, head, time1, time2)
601
+ scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1))
602
+
603
+ # 4. then compute matrix b and matrix d
604
+ # => (batch, head, time1, 2*time1-1)
605
+ scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings)
606
+
607
+ # 5. shift matrix b and matrix d
608
+ zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype)
609
+ scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1)
610
+ scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2])
611
+ scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape)
612
+ scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd)
613
+ scores_bd = scores_bd[:, :, :, : scores_bd.size(-1) // 2 + 1]
614
+
615
+ # 6. sum matrices
616
+ # => (batch, head, time1, time2)
617
+ scores = (scores_ac + scores_bd) / math.sqrt(self.head_size)
618
+
619
+ return scores
620
+
621
+
622
+ class Wav2Vec2BertEncoderLayer(nn.Module):
623
+ """Conformer block based on https://arxiv.org/abs/2005.08100."""
624
+
625
+ def __init__(self, config):
626
+ super().__init__()
627
+ embed_dim = config.hidden_size
628
+ dropout = config.attention_dropout
629
+
630
+ # Feed-forward 1
631
+ self.ffn1_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
632
+ self.ffn1 = Wav2Vec2BertFeedForward(config)
633
+
634
+ # Self-Attention
635
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
636
+ self.self_attn_dropout = nn.Dropout(dropout)
637
+ self.self_attn = Wav2Vec2BertSelfAttention(config)
638
+
639
+ # Conformer Convolution
640
+ self.conv_module = Wav2Vec2BertConvolutionModule(config)
641
+
642
+ # Feed-forward 2
643
+ self.ffn2_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
644
+ self.ffn2 = Wav2Vec2BertFeedForward(config)
645
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
646
+
647
+ def forward(
648
+ self,
649
+ hidden_states,
650
+ attention_mask: Optional[torch.Tensor] = None,
651
+ relative_position_embeddings: Optional[torch.Tensor] = None,
652
+ output_attentions: bool = False,
653
+ conv_attention_mask: Optional[torch.Tensor] = None,
654
+ ):
655
+ hidden_states = hidden_states
656
+
657
+ # 1. Feed-Forward 1 layer
658
+ residual = hidden_states
659
+ hidden_states = self.ffn1_layer_norm(hidden_states)
660
+ hidden_states = self.ffn1(hidden_states)
661
+ hidden_states = hidden_states * 0.5 + residual
662
+ residual = hidden_states
663
+
664
+ # 2. Self-Attention layer
665
+ hidden_states = self.self_attn_layer_norm(hidden_states)
666
+ hidden_states, attn_weigts = self.self_attn(
667
+ hidden_states=hidden_states,
668
+ attention_mask=attention_mask,
669
+ relative_position_embeddings=relative_position_embeddings,
670
+ output_attentions=output_attentions,
671
+ )
672
+ hidden_states = self.self_attn_dropout(hidden_states)
673
+ hidden_states = hidden_states + residual
674
+
675
+ # 3. Convolutional Layer
676
+ residual = hidden_states
677
+ hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask)
678
+ hidden_states = residual + hidden_states
679
+
680
+ # 4. Feed-Forward 2 Layer
681
+ residual = hidden_states
682
+ hidden_states = self.ffn2_layer_norm(hidden_states)
683
+ hidden_states = self.ffn2(hidden_states)
684
+ hidden_states = hidden_states * 0.5 + residual
685
+ hidden_states = self.final_layer_norm(hidden_states)
686
+
687
+ return hidden_states, attn_weigts
688
+
689
+
690
+ class Wav2Vec2BertEncoder(nn.Module):
691
+ def __init__(self, config):
692
+ super().__init__()
693
+ self.config = config
694
+
695
+ if config.position_embeddings_type == "relative":
696
+ self.embed_positions = Wav2Vec2BertRelPositionalEmbedding(config)
697
+ elif config.position_embeddings_type == "rotary":
698
+ self.embed_positions = Wav2Vec2BertRotaryPositionalEmbedding(config)
699
+ else:
700
+ self.embed_positions = None
701
+
702
+ self.dropout = nn.Dropout(config.hidden_dropout)
703
+ self.layers = nn.ModuleList([Wav2Vec2BertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
704
+ self.gradient_checkpointing = False
705
+
706
+ def forward(
707
+ self,
708
+ hidden_states,
709
+ attention_mask=None,
710
+ output_attentions=False,
711
+ output_hidden_states=False,
712
+ return_dict=True,
713
+ ):
714
+ all_hidden_states = () if output_hidden_states else None
715
+ all_self_attentions = () if output_attentions else None
716
+
717
+ conv_attention_mask = attention_mask
718
+ if attention_mask is not None:
719
+ # make sure padded tokens output 0
720
+ hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
721
+
722
+ # extend attention_mask
723
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
724
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
725
+ attention_mask = attention_mask.expand(
726
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
727
+ )
728
+
729
+ hidden_states = self.dropout(hidden_states)
730
+
731
+ if self.embed_positions is not None:
732
+ relative_position_embeddings = self.embed_positions(hidden_states)
733
+ else:
734
+ relative_position_embeddings = None
735
+
736
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
737
+
738
+ for i, layer in enumerate(self.layers):
739
+ if output_hidden_states:
740
+ all_hidden_states = all_hidden_states + (hidden_states,)
741
+
742
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
743
+ dropout_probability = torch.rand([])
744
+
745
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
746
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
747
+ # under deepspeed zero3 all gpus must run in sync
748
+ if self.gradient_checkpointing and self.training:
749
+ layer_outputs = self._gradient_checkpointing_func(
750
+ layer.__call__,
751
+ hidden_states,
752
+ attention_mask,
753
+ relative_position_embeddings,
754
+ output_attentions,
755
+ conv_attention_mask,
756
+ )
757
+ else:
758
+ layer_outputs = layer(
759
+ hidden_states,
760
+ attention_mask=attention_mask,
761
+ relative_position_embeddings=relative_position_embeddings,
762
+ output_attentions=output_attentions,
763
+ conv_attention_mask=conv_attention_mask,
764
+ )
765
+ hidden_states = layer_outputs[0]
766
+
767
+ if skip_the_layer:
768
+ layer_outputs = (None, None)
769
+
770
+ if output_attentions:
771
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
772
+
773
+ if output_hidden_states:
774
+ all_hidden_states = all_hidden_states + (hidden_states,)
775
+
776
+ if not return_dict:
777
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
778
+ return BaseModelOutput(
779
+ last_hidden_state=hidden_states,
780
+ hidden_states=all_hidden_states,
781
+ attentions=all_self_attentions,
782
+ )
783
+
784
+
785
+ class Wav2Vec2BertAdapter(nn.Module):
786
+ def __init__(self, config):
787
+ super().__init__()
788
+ # feature dim might need to be down-projected
789
+ if config.output_hidden_size != config.hidden_size:
790
+ self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
791
+ self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size, eps=config.layer_norm_eps)
792
+ else:
793
+ self.proj = self.proj_layer_norm = None
794
+ self.layers = nn.ModuleList(Wav2Vec2BertAdapterLayer(config) for _ in range(config.num_adapter_layers))
795
+ self.layerdrop = config.layerdrop
796
+
797
+ self.kernel_size = config.adapter_kernel_size
798
+ self.stride = config.adapter_stride
799
+
800
+ def _compute_sub_sample_lengths_from_attention_mask(self, seq_lens):
801
+ if seq_lens is None:
802
+ return seq_lens
803
+ pad = self.kernel_size // 2
804
+ seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1
805
+ return seq_lens.floor()
806
+
807
+ def forward(self, hidden_states, attention_mask=None):
808
+ # down project hidden_states if necessary
809
+ if self.proj is not None and self.proj_layer_norm is not None:
810
+ hidden_states = self.proj(hidden_states)
811
+ hidden_states = self.proj_layer_norm(hidden_states)
812
+
813
+ sub_sampled_lengths = None
814
+ if attention_mask is not None:
815
+ sub_sampled_lengths = (attention_mask.size(1) - (1 - attention_mask.int()).sum(1)).to(hidden_states.device)
816
+
817
+ for layer in self.layers:
818
+ layerdrop_prob = torch.rand([])
819
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(sub_sampled_lengths)
820
+ if not self.training or (layerdrop_prob > self.layerdrop):
821
+ hidden_states = layer(
822
+ hidden_states, attention_mask=attention_mask, sub_sampled_lengths=sub_sampled_lengths
823
+ )
824
+
825
+ return hidden_states
826
+
827
+
828
+ class Wav2Vec2BertAdapterLayer(nn.Module):
829
+ def __init__(self, config):
830
+ super().__init__()
831
+ embed_dim = config.output_hidden_size
832
+ dropout = config.conformer_conv_dropout
833
+
834
+ self.kernel_size = config.adapter_kernel_size
835
+ self.stride = config.adapter_stride
836
+
837
+ # 1. residual convolution
838
+ self.residual_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
839
+ self.residual_conv = nn.Conv1d(
840
+ embed_dim,
841
+ 2 * embed_dim,
842
+ self.kernel_size,
843
+ stride=self.stride,
844
+ padding=self.stride // 2,
845
+ )
846
+ self.activation = nn.GLU(dim=1)
847
+
848
+ # Self-Attention
849
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
850
+ self.self_attn_conv = nn.Conv1d(
851
+ embed_dim,
852
+ 2 * embed_dim,
853
+ self.kernel_size,
854
+ stride=self.stride,
855
+ padding=self.stride // 2,
856
+ )
857
+ self.self_attn = Wav2Vec2BertSelfAttention(config, is_adapter_attention=True)
858
+ self.self_attn_dropout = nn.Dropout(dropout)
859
+
860
+ # Feed-forward
861
+ self.ffn_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
862
+ self.ffn = Wav2Vec2BertFeedForward(config, act_fn=config.adapter_act, hidden_size=embed_dim)
863
+
864
+ def forward(
865
+ self,
866
+ hidden_states,
867
+ attention_mask: Optional[torch.Tensor] = None,
868
+ output_attentions: bool = False,
869
+ sub_sampled_lengths: Optional[torch.Tensor] = None,
870
+ ):
871
+ residual = self.residual_layer_norm(hidden_states)
872
+
873
+ # Apply pooling to the residual to match the sequence length of the
874
+ # multi-head attention output.
875
+ # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
876
+ residual = residual.transpose(1, 2)
877
+ residual = self.residual_conv(residual)
878
+ residual = self.activation(residual)
879
+ # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
880
+ residual = residual.transpose(1, 2)
881
+
882
+ hidden_states = self.self_attn_layer_norm(hidden_states)
883
+ # Apply pooling before feeding to the multihead-attention layer.
884
+ # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
885
+ hidden_states = hidden_states.transpose(1, 2)
886
+ hidden_states = self.self_attn_conv(hidden_states)
887
+ hidden_states = self.activation(hidden_states)
888
+ # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
889
+ hidden_states = hidden_states.transpose(1, 2)
890
+
891
+ if attention_mask is not None:
892
+ attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths)
893
+ attention_mask = _prepare_4d_attention_mask(
894
+ attention_mask,
895
+ hidden_states.dtype,
896
+ )
897
+
898
+ # The rest of the computation is identical to a vanilla Transformer
899
+ # encoder layer.
900
+ hidden_states, attn_weigths = self.self_attn(
901
+ hidden_states,
902
+ attention_mask=attention_mask,
903
+ output_attentions=output_attentions,
904
+ )
905
+ hidden_states = self.self_attn_dropout(hidden_states)
906
+ hidden_states = hidden_states + residual
907
+
908
+ residual = hidden_states
909
+
910
+ hidden_states = self.ffn_layer_norm(hidden_states)
911
+ hidden_states = self.ffn(hidden_states) + residual
912
+
913
+ return hidden_states
914
+
915
+
916
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerPreTrainedModel with Wav2Vec2Conformer->Wav2Vec2Bert,wav2vec2_conformer->wav2vec2_bert, input_values->input_features
917
+ class Wav2Vec2BertPreTrainedModel(PreTrainedModel):
918
+ """
919
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
920
+ models.
921
+ """
922
+
923
+ config_class = Wav2Vec2BertConfig
924
+ base_model_prefix = "wav2vec2_bert"
925
+ main_input_name = "input_features"
926
+ supports_gradient_checkpointing = True
927
+
928
+ # Ignore copy
929
+ def _init_weights(self, module):
930
+ """Initialize the weights"""
931
+ if isinstance(module, Wav2Vec2BertSelfAttention):
932
+ if hasattr(module, "pos_bias_u"):
933
+ nn.init.xavier_uniform_(module.pos_bias_u)
934
+ if hasattr(module, "pos_bias_v"):
935
+ nn.init.xavier_uniform_(module.pos_bias_v)
936
+ elif isinstance(module, Wav2Vec2BertFeatureProjection):
937
+ k = math.sqrt(1 / module.projection.in_features)
938
+ nn.init.uniform_(module.projection.weight, a=-k, b=k)
939
+ nn.init.uniform_(module.projection.bias, a=-k, b=k)
940
+ elif isinstance(module, nn.Linear):
941
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
942
+
943
+ if module.bias is not None:
944
+ module.bias.data.zero_()
945
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
946
+ module.bias.data.zero_()
947
+ module.weight.data.fill_(1.0)
948
+ elif isinstance(module, nn.Conv1d):
949
+ nn.init.kaiming_normal_(module.weight)
950
+
951
+ if module.bias is not None:
952
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
953
+ nn.init.uniform_(module.bias, a=-k, b=k)
954
+
955
+ # Ignore copy
956
+ def _get_feat_extract_output_lengths(
957
+ self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None
958
+ ):
959
+ """
960
+ Computes the output length of the convolutional layers
961
+ """
962
+
963
+ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
964
+
965
+ def _conv_out_length(input_length, kernel_size, stride, padding):
966
+ # 1D convolutional layer output length formula taken
967
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
968
+ return torch.div(input_length + 2 * padding - kernel_size, stride, rounding_mode="floor") + 1
969
+
970
+ if add_adapter:
971
+ padding = self.config.adapter_kernel_size // 2
972
+ for _ in range(self.config.num_adapter_layers):
973
+ input_lengths = _conv_out_length(
974
+ input_lengths, self.config.adapter_kernel_size, self.config.adapter_stride, padding
975
+ )
976
+
977
+ return input_lengths
978
+
979
+ def _get_feature_vector_attention_mask(
980
+ self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None
981
+ ):
982
+ # Effectively attention_mask.sum(-1), but not inplace to be able to run
983
+ # on inference mode.
984
+ non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
985
+
986
+ output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
987
+ output_lengths = output_lengths.to(torch.long)
988
+
989
+ batch_size = attention_mask.shape[0]
990
+
991
+ attention_mask = torch.zeros(
992
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
993
+ )
994
+ # these two operations makes sure that all values before the output lengths idxs are attended to
995
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
996
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
997
+ return attention_mask
998
+
999
+
1000
+ WAV2VEC2_BERT_START_DOCSTRING = r"""
1001
+ Wav2Vec2Bert was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech
1002
+ Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael
1003
+ Auli.
1004
+
1005
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1006
+ library implements for all its model (such as downloading or saving etc.).
1007
+
1008
+ This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) sub-class. Use it as a
1009
+ regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.
1010
+
1011
+ Parameters:
1012
+ config ([`Wav2Vec2BertConfig`]): Model configuration class with all the parameters of the model.
1013
+ Initializing with a config file does not load the weights associated with the model, only the
1014
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1015
+ """
1016
+
1017
+
1018
+ WAV2VEC2_BERT_INPUTS_DOCSTRING = r"""
1019
+ Args:
1020
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
1021
+ Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
1022
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
1023
+ soundfile`). To prepare the array into `input_features`, the [`AutoProcessor`] should be used for padding and
1024
+ conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2BertProcessor.__call__`] for details.
1025
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1026
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1027
+ 1]`:
1028
+
1029
+ - 1 for tokens that are **not masked**,
1030
+ - 0 for tokens that are **masked**.
1031
+
1032
+ [What are attention masks?](../glossary#attention-mask)
1033
+ output_attentions (`bool`, *optional*):
1034
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1035
+ tensors for more detail.
1036
+ output_hidden_states (`bool`, *optional*):
1037
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1038
+ more detail.
1039
+ return_dict (`bool`, *optional*):
1040
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1041
+ """
1042
+
1043
+
1044
+ @add_start_docstrings(
1045
+ "The bare Wav2Vec2Bert Model transformer outputting raw hidden-states without any specific head on top.",
1046
+ WAV2VEC2_BERT_START_DOCSTRING,
1047
+ )
1048
+ class Wav2Vec2BertModel(Wav2Vec2BertPreTrainedModel):
1049
+ def __init__(self, config: Wav2Vec2BertConfig):
1050
+ super().__init__(config)
1051
+ self.config = config
1052
+ self.feature_projection = Wav2Vec2BertFeatureProjection(config)
1053
+
1054
+ # model only needs masking vector if mask prob is > 0.0
1055
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
1056
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
1057
+
1058
+ self.encoder = Wav2Vec2BertEncoder(config)
1059
+
1060
+ self.adapter = Wav2Vec2BertAdapter(config) if config.add_adapter else None
1061
+
1062
+ self.intermediate_ffn = None
1063
+ if config.use_intermediate_ffn_before_adapter:
1064
+ self.intermediate_ffn = Wav2Vec2BertFeedForward(config, act_fn="relu")
1065
+
1066
+ # Initialize weights and apply final processing
1067
+ self.post_init()
1068
+
1069
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
1070
+ def _mask_hidden_states(
1071
+ self,
1072
+ hidden_states: torch.FloatTensor,
1073
+ mask_time_indices: Optional[torch.FloatTensor] = None,
1074
+ attention_mask: Optional[torch.LongTensor] = None,
1075
+ ):
1076
+ """
1077
+ Masks extracted features along time axis and/or along feature axis according to
1078
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
1079
+ """
1080
+
1081
+ # `config.apply_spec_augment` can set masking to False
1082
+ if not getattr(self.config, "apply_spec_augment", True):
1083
+ return hidden_states
1084
+
1085
+ # generate indices & apply SpecAugment along time axis
1086
+ batch_size, sequence_length, hidden_size = hidden_states.size()
1087
+
1088
+ if mask_time_indices is not None:
1089
+ # apply SpecAugment along time axis with given mask_time_indices
1090
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
1091
+ elif self.config.mask_time_prob > 0 and self.training:
1092
+ mask_time_indices = _compute_mask_indices(
1093
+ (batch_size, sequence_length),
1094
+ mask_prob=self.config.mask_time_prob,
1095
+ mask_length=self.config.mask_time_length,
1096
+ attention_mask=attention_mask,
1097
+ min_masks=self.config.mask_time_min_masks,
1098
+ )
1099
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
1100
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
1101
+
1102
+ if self.config.mask_feature_prob > 0 and self.training:
1103
+ # generate indices & apply SpecAugment along feature axis
1104
+ mask_feature_indices = _compute_mask_indices(
1105
+ (batch_size, hidden_size),
1106
+ mask_prob=self.config.mask_feature_prob,
1107
+ mask_length=self.config.mask_feature_length,
1108
+ min_masks=self.config.mask_feature_min_masks,
1109
+ )
1110
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
1111
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
1112
+ hidden_states[mask_feature_indices] = 0
1113
+
1114
+ return hidden_states
1115
+
1116
+ @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING)
1117
+ @add_code_sample_docstrings(
1118
+ checkpoint=_PRETRAINED_CHECKPOINT_FOR_DOC,
1119
+ output_type=Wav2Vec2BaseModelOutput,
1120
+ config_class=_CONFIG_FOR_DOC,
1121
+ modality="audio",
1122
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
1123
+ )
1124
+ def forward(
1125
+ self,
1126
+ input_features: Optional[torch.Tensor],
1127
+ attention_mask: Optional[torch.Tensor] = None,
1128
+ mask_time_indices: Optional[torch.FloatTensor] = None,
1129
+ output_attentions: Optional[bool] = None,
1130
+ output_hidden_states: Optional[bool] = None,
1131
+ return_dict: Optional[bool] = None,
1132
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
1133
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1134
+ output_hidden_states = (
1135
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1136
+ )
1137
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1138
+
1139
+ hidden_states, extract_features = self.feature_projection(input_features)
1140
+ hidden_states = self._mask_hidden_states(
1141
+ hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
1142
+ )
1143
+
1144
+ encoder_outputs = self.encoder(
1145
+ hidden_states,
1146
+ attention_mask=attention_mask,
1147
+ output_attentions=output_attentions,
1148
+ output_hidden_states=output_hidden_states,
1149
+ return_dict=return_dict,
1150
+ )
1151
+
1152
+ hidden_states = encoder_outputs[0]
1153
+
1154
+ if self.intermediate_ffn:
1155
+ expanded_hidden_states = self.intermediate_ffn(hidden_states)
1156
+ hidden_states = hidden_states + 0.5 * expanded_hidden_states
1157
+
1158
+ if self.adapter is not None:
1159
+ hidden_states = self.adapter(hidden_states, attention_mask=attention_mask)
1160
+
1161
+ if not return_dict:
1162
+ return (hidden_states, extract_features) + encoder_outputs[1:]
1163
+
1164
+ return Wav2Vec2BaseModelOutput(
1165
+ last_hidden_state=hidden_states,
1166
+ extract_features=extract_features,
1167
+ hidden_states=encoder_outputs.hidden_states,
1168
+ attentions=encoder_outputs.attentions,
1169
+ )
1170
+
1171
+
1172
+ @add_start_docstrings(
1173
+ """Wav2Vec2Bert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
1174
+ WAV2VEC2_BERT_START_DOCSTRING,
1175
+ )
1176
+ class Wav2Vec2BertForCTC(Wav2Vec2BertPreTrainedModel):
1177
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForCTC.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert
1178
+ def __init__(self, config, target_lang: Optional[str] = None):
1179
+ super().__init__(config)
1180
+
1181
+ self.wav2vec2_bert = Wav2Vec2BertModel(config)
1182
+ self.dropout = nn.Dropout(config.final_dropout)
1183
+
1184
+ self.target_lang = target_lang
1185
+
1186
+ if config.vocab_size is None:
1187
+ raise ValueError(
1188
+ f"You are trying to instantiate {self.__class__} with a configuration that "
1189
+ "does not define the vocabulary size of the language model head. Please "
1190
+ "instantiate the model as follows: `Wav2Vec2BertForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
1191
+ "or define `vocab_size` of your model's configuration."
1192
+ )
1193
+ output_hidden_size = (
1194
+ config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
1195
+ )
1196
+ self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
1197
+
1198
+ # Initialize weights and apply final processing
1199
+ self.post_init()
1200
+
1201
+ @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING)
1202
+ @add_code_sample_docstrings(
1203
+ checkpoint=_PRETRAINED_CHECKPOINT_FOR_DOC,
1204
+ output_type=CausalLMOutput,
1205
+ config_class=_CONFIG_FOR_DOC,
1206
+ expected_output=_CTC_EXPECTED_OUTPUT,
1207
+ expected_loss=_CTC_EXPECTED_LOSS,
1208
+ )
1209
+ def forward(
1210
+ self,
1211
+ input_features: Optional[torch.Tensor],
1212
+ attention_mask: Optional[torch.Tensor] = None,
1213
+ output_attentions: Optional[bool] = None,
1214
+ output_hidden_states: Optional[bool] = None,
1215
+ return_dict: Optional[bool] = None,
1216
+ labels: Optional[torch.Tensor] = None,
1217
+ ) -> Union[Tuple, CausalLMOutput]:
1218
+ r"""
1219
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
1220
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
1221
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
1222
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
1223
+ config.vocab_size - 1]`.
1224
+ """
1225
+
1226
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1227
+
1228
+ outputs = self.wav2vec2_bert(
1229
+ input_features,
1230
+ attention_mask=attention_mask,
1231
+ output_attentions=output_attentions,
1232
+ output_hidden_states=output_hidden_states,
1233
+ return_dict=return_dict,
1234
+ )
1235
+
1236
+ hidden_states = outputs[0]
1237
+ hidden_states = self.dropout(hidden_states)
1238
+
1239
+ logits = self.lm_head(hidden_states)
1240
+
1241
+ loss = None
1242
+ if labels is not None:
1243
+ if labels.max() >= self.config.vocab_size:
1244
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
1245
+
1246
+ # retrieve loss input_lengths from attention_mask
1247
+ attention_mask = (
1248
+ attention_mask
1249
+ if attention_mask is not None
1250
+ else torch.ones(input_features.shape[:2], device=input_features.device, dtype=torch.long)
1251
+ )
1252
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum([-1])).to(torch.long)
1253
+
1254
+ # assuming that padded tokens are filled with -100
1255
+ # when not being attended to
1256
+ labels_mask = labels >= 0
1257
+ target_lengths = labels_mask.sum(-1)
1258
+ flattened_targets = labels.masked_select(labels_mask)
1259
+
1260
+ # ctc_loss doesn't support fp16
1261
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
1262
+
1263
+ with torch.backends.cudnn.flags(enabled=False):
1264
+ loss = nn.functional.ctc_loss(
1265
+ log_probs,
1266
+ flattened_targets,
1267
+ input_lengths,
1268
+ target_lengths,
1269
+ blank=self.config.pad_token_id,
1270
+ reduction=self.config.ctc_loss_reduction,
1271
+ zero_infinity=self.config.ctc_zero_infinity,
1272
+ )
1273
+
1274
+ if not return_dict:
1275
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1276
+ return ((loss,) + output) if loss is not None else output
1277
+
1278
+ return CausalLMOutput(
1279
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
1280
+ )
1281
+
1282
+
1283
+ @add_start_docstrings(
1284
+ """
1285
+ Wav2Vec2Bert Model with a sequence classification head on top (a linear layer over the pooled output) for
1286
+ tasks like SUPERB Keyword Spotting.
1287
+ """,
1288
+ WAV2VEC2_BERT_START_DOCSTRING,
1289
+ )
1290
+ class Wav2Vec2BertForSequenceClassification(Wav2Vec2BertPreTrainedModel):
1291
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.__init__ with Wav2Vec2->Wav2Vec2Bert,wav2vec2->wav2vec2_bert
1292
+ def __init__(self, config):
1293
+ super().__init__(config)
1294
+
1295
+ if hasattr(config, "add_adapter") and config.add_adapter:
1296
+ raise ValueError(
1297
+ "Sequence classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)"
1298
+ )
1299
+ self.wav2vec2_bert = Wav2Vec2BertModel(config)
1300
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1301
+ if config.use_weighted_layer_sum:
1302
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1303
+ self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
1304
+ self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
1305
+
1306
+ # Initialize weights and apply final processing
1307
+ self.post_init()
1308
+
1309
+ def freeze_base_model(self):
1310
+ """
1311
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1312
+ be updated during training. Only the classification head will be updated.
1313
+ """
1314
+ for param in self.wav2vec2_bert.parameters():
1315
+ param.requires_grad = False
1316
+
1317
+ @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING)
1318
+ @add_code_sample_docstrings(
1319
+ checkpoint=_BASE_CHECKPOINT_FOR_DOC,
1320
+ output_type=SequenceClassifierOutput,
1321
+ config_class=_CONFIG_FOR_DOC,
1322
+ modality="audio",
1323
+ )
1324
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->Wav2Vec2Bert,wav2vec2->wav2vec2_bert,WAV_2_VEC_2->WAV2VEC2_BERT, input_values->input_features
1325
+ def forward(
1326
+ self,
1327
+ input_features: Optional[torch.Tensor],
1328
+ attention_mask: Optional[torch.Tensor] = None,
1329
+ output_attentions: Optional[bool] = None,
1330
+ output_hidden_states: Optional[bool] = None,
1331
+ return_dict: Optional[bool] = None,
1332
+ labels: Optional[torch.Tensor] = None,
1333
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1334
+ r"""
1335
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1336
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1337
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1338
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1339
+ """
1340
+
1341
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1342
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1343
+
1344
+ outputs = self.wav2vec2_bert(
1345
+ input_features,
1346
+ attention_mask=attention_mask,
1347
+ output_attentions=output_attentions,
1348
+ output_hidden_states=output_hidden_states,
1349
+ return_dict=return_dict,
1350
+ )
1351
+
1352
+ if self.config.use_weighted_layer_sum:
1353
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1354
+ hidden_states = torch.stack(hidden_states, dim=1)
1355
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1356
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1357
+ else:
1358
+ hidden_states = outputs[0]
1359
+
1360
+ hidden_states = self.projector(hidden_states)
1361
+ if attention_mask is None:
1362
+ pooled_output = hidden_states.mean(dim=1)
1363
+ else:
1364
+ padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
1365
+ hidden_states[~padding_mask] = 0.0
1366
+ pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
1367
+
1368
+ logits = self.classifier(pooled_output)
1369
+
1370
+ loss = None
1371
+ if labels is not None:
1372
+ loss_fct = CrossEntropyLoss()
1373
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
1374
+
1375
+ if not return_dict:
1376
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1377
+ return ((loss,) + output) if loss is not None else output
1378
+
1379
+ return SequenceClassifierOutput(
1380
+ loss=loss,
1381
+ logits=logits,
1382
+ hidden_states=outputs.hidden_states,
1383
+ attentions=outputs.attentions,
1384
+ )
1385
+
1386
+
1387
+ @add_start_docstrings(
1388
+ """
1389
+ Wav2Vec2Bert Model with a frame classification head on top for tasks like Speaker Diarization.
1390
+ """,
1391
+ WAV2VEC2_BERT_START_DOCSTRING,
1392
+ )
1393
+ class Wav2Vec2BertForAudioFrameClassification(Wav2Vec2BertPreTrainedModel):
1394
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert
1395
+ def __init__(self, config):
1396
+ super().__init__(config)
1397
+
1398
+ if hasattr(config, "add_adapter") and config.add_adapter:
1399
+ raise ValueError(
1400
+ "Audio frame classification does not support the use of Wav2Vec2Bert adapters (config.add_adapter=True)"
1401
+ )
1402
+ self.wav2vec2_bert = Wav2Vec2BertModel(config)
1403
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1404
+ if config.use_weighted_layer_sum:
1405
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1406
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1407
+ self.num_labels = config.num_labels
1408
+
1409
+ self.init_weights()
1410
+
1411
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.freeze_base_model with wav2vec2_conformer->wav2vec2_bert
1412
+ def freeze_base_model(self):
1413
+ """
1414
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1415
+ be updated during training. Only the classification head will be updated.
1416
+ """
1417
+ for param in self.wav2vec2_bert.parameters():
1418
+ param.requires_grad = False
1419
+
1420
+ @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING)
1421
+ @add_code_sample_docstrings(
1422
+ checkpoint=_BASE_CHECKPOINT_FOR_DOC,
1423
+ output_type=TokenClassifierOutput,
1424
+ config_class=_CONFIG_FOR_DOC,
1425
+ modality="audio",
1426
+ )
1427
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForAudioFrameClassification.forward with wav2vec2_conformer->wav2vec2_bert, input_values->input_features
1428
+ def forward(
1429
+ self,
1430
+ input_features: Optional[torch.Tensor],
1431
+ attention_mask: Optional[torch.Tensor] = None,
1432
+ labels: Optional[torch.Tensor] = None,
1433
+ output_attentions: Optional[bool] = None,
1434
+ output_hidden_states: Optional[bool] = None,
1435
+ return_dict: Optional[bool] = None,
1436
+ ) -> Union[Tuple, TokenClassifierOutput]:
1437
+ r"""
1438
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1439
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1440
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1441
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1442
+ """
1443
+
1444
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1445
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1446
+
1447
+ outputs = self.wav2vec2_bert(
1448
+ input_features,
1449
+ attention_mask=attention_mask,
1450
+ output_attentions=output_attentions,
1451
+ output_hidden_states=output_hidden_states,
1452
+ return_dict=return_dict,
1453
+ )
1454
+
1455
+ if self.config.use_weighted_layer_sum:
1456
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1457
+ hidden_states = torch.stack(hidden_states, dim=1)
1458
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1459
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1460
+ else:
1461
+ hidden_states = outputs[0]
1462
+
1463
+ logits = self.classifier(hidden_states)
1464
+
1465
+ loss = None
1466
+ if labels is not None:
1467
+ loss_fct = CrossEntropyLoss()
1468
+ loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
1469
+
1470
+ if not return_dict:
1471
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
1472
+ return output
1473
+
1474
+ return TokenClassifierOutput(
1475
+ loss=loss,
1476
+ logits=logits,
1477
+ hidden_states=outputs.hidden_states,
1478
+ attentions=outputs.attentions,
1479
+ )
1480
+
1481
+
1482
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss
1483
+ class AMSoftmaxLoss(nn.Module):
1484
+ def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
1485
+ super(AMSoftmaxLoss, self).__init__()
1486
+ self.scale = scale
1487
+ self.margin = margin
1488
+ self.num_labels = num_labels
1489
+ self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
1490
+ self.loss = nn.CrossEntropyLoss()
1491
+
1492
+ def forward(self, hidden_states, labels):
1493
+ labels = labels.flatten()
1494
+ weight = nn.functional.normalize(self.weight, dim=0)
1495
+ hidden_states = nn.functional.normalize(hidden_states, dim=1)
1496
+ cos_theta = torch.mm(hidden_states, weight)
1497
+ psi = cos_theta - self.margin
1498
+
1499
+ onehot = nn.functional.one_hot(labels, self.num_labels)
1500
+ logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
1501
+ loss = self.loss(logits, labels)
1502
+
1503
+ return loss
1504
+
1505
+
1506
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer
1507
+ class TDNNLayer(nn.Module):
1508
+ def __init__(self, config, layer_id=0):
1509
+ super().__init__()
1510
+ self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
1511
+ self.out_conv_dim = config.tdnn_dim[layer_id]
1512
+ self.kernel_size = config.tdnn_kernel[layer_id]
1513
+ self.dilation = config.tdnn_dilation[layer_id]
1514
+
1515
+ self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
1516
+ self.activation = nn.ReLU()
1517
+
1518
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
1519
+ if is_peft_available():
1520
+ from peft.tuners.lora import LoraLayer
1521
+
1522
+ if isinstance(self.kernel, LoraLayer):
1523
+ warnings.warn(
1524
+ "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. "
1525
+ "You should exclude TDNNLayer from LoRA's target modules.",
1526
+ )
1527
+
1528
+ # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up
1529
+ hidden_states = hidden_states.transpose(1, 2)
1530
+ weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
1531
+ hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
1532
+ hidden_states = hidden_states.transpose(1, 2)
1533
+
1534
+ hidden_states = self.activation(hidden_states)
1535
+ return hidden_states
1536
+
1537
+
1538
+ @add_start_docstrings(
1539
+ """
1540
+ Wav2Vec2Bert Model with an XVector feature extraction head on top for tasks like Speaker Verification.
1541
+ """,
1542
+ WAV2VEC2_BERT_START_DOCSTRING,
1543
+ )
1544
+ class Wav2Vec2BertForXVector(Wav2Vec2BertPreTrainedModel):
1545
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.__init__ with Wav2Vec2Conformer->Wav2Vec2Bert,WAV2VEC2_CONFORMER->WAV2VEC2_BERT,wav2vec2_conformer->wav2vec2_bert
1546
+ def __init__(self, config):
1547
+ super().__init__(config)
1548
+
1549
+ self.wav2vec2_bert = Wav2Vec2BertModel(config)
1550
+ num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
1551
+ if config.use_weighted_layer_sum:
1552
+ self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
1553
+ self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
1554
+
1555
+ tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
1556
+ self.tdnn = nn.ModuleList(tdnn_layers)
1557
+
1558
+ self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
1559
+ self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
1560
+
1561
+ self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
1562
+
1563
+ self.init_weights()
1564
+
1565
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.freeze_base_model with wav2vec2_conformer->wav2vec2_bert
1566
+ def freeze_base_model(self):
1567
+ """
1568
+ Calling this function will disable the gradient computation for the base model so that its parameters will not
1569
+ be updated during training. Only the classification head will be updated.
1570
+ """
1571
+ for param in self.wav2vec2_bert.parameters():
1572
+ param.requires_grad = False
1573
+
1574
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector._get_tdnn_output_lengths
1575
+ def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
1576
+ """
1577
+ Computes the output length of the TDNN layers
1578
+ """
1579
+
1580
+ def _conv_out_length(input_length, kernel_size, stride):
1581
+ # 1D convolutional layer output length formula taken
1582
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
1583
+ return (input_length - kernel_size) // stride + 1
1584
+
1585
+ for kernel_size in self.config.tdnn_kernel:
1586
+ input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
1587
+
1588
+ return input_lengths
1589
+
1590
+ @add_start_docstrings_to_model_forward(WAV2VEC2_BERT_INPUTS_DOCSTRING)
1591
+ @add_code_sample_docstrings(
1592
+ checkpoint=_BASE_CHECKPOINT_FOR_DOC,
1593
+ output_type=XVectorOutput,
1594
+ config_class=_CONFIG_FOR_DOC,
1595
+ modality="audio",
1596
+ )
1597
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerForXVector.forward with wav2vec2_conformer->wav2vec2_bert, input_values->input_features
1598
+ def forward(
1599
+ self,
1600
+ input_features: Optional[torch.Tensor],
1601
+ attention_mask: Optional[torch.Tensor] = None,
1602
+ output_attentions: Optional[bool] = None,
1603
+ output_hidden_states: Optional[bool] = None,
1604
+ return_dict: Optional[bool] = None,
1605
+ labels: Optional[torch.Tensor] = None,
1606
+ ) -> Union[Tuple, XVectorOutput]:
1607
+ r"""
1608
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1609
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1610
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1611
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1612
+ """
1613
+
1614
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1615
+ output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
1616
+
1617
+ outputs = self.wav2vec2_bert(
1618
+ input_features,
1619
+ attention_mask=attention_mask,
1620
+ output_attentions=output_attentions,
1621
+ output_hidden_states=output_hidden_states,
1622
+ return_dict=return_dict,
1623
+ )
1624
+
1625
+ if self.config.use_weighted_layer_sum:
1626
+ hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
1627
+ hidden_states = torch.stack(hidden_states, dim=1)
1628
+ norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
1629
+ hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
1630
+ else:
1631
+ hidden_states = outputs[0]
1632
+
1633
+ hidden_states = self.projector(hidden_states)
1634
+
1635
+ for tdnn_layer in self.tdnn:
1636
+ hidden_states = tdnn_layer(hidden_states)
1637
+
1638
+ # Statistic Pooling
1639
+ if attention_mask is None:
1640
+ mean_features = hidden_states.mean(dim=1)
1641
+ std_features = hidden_states.std(dim=1)
1642
+ else:
1643
+ feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
1644
+ tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
1645
+ mean_features = []
1646
+ std_features = []
1647
+ for i, length in enumerate(tdnn_output_lengths):
1648
+ mean_features.append(hidden_states[i, :length].mean(dim=0))
1649
+ std_features.append(hidden_states[i, :length].std(dim=0))
1650
+ mean_features = torch.stack(mean_features)
1651
+ std_features = torch.stack(std_features)
1652
+ statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
1653
+
1654
+ output_embeddings = self.feature_extractor(statistic_pooling)
1655
+ logits = self.classifier(output_embeddings)
1656
+
1657
+ loss = None
1658
+ if labels is not None:
1659
+ loss = self.objective(logits, labels)
1660
+
1661
+ if not return_dict:
1662
+ output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
1663
+ return ((loss,) + output) if loss is not None else output
1664
+
1665
+ return XVectorOutput(
1666
+ loss=loss,
1667
+ logits=logits,
1668
+ embeddings=output_embeddings,
1669
+ hidden_states=outputs.hidden_states,
1670
+ attentions=outputs.attentions,
1671
+ )