applied-ai-018 commited on
Commit
ae23c25
·
verified ·
1 Parent(s): 7da0820

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py +97 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py +118 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py +286 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py +576 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py +686 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py +43 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py +197 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py +342 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py +70 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py +159 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py +181 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +1345 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py +541 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/convert_groupvit_nvlab_to_hf.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_groupvit.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py +1586 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__init__.py +145 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/convert_mobilebert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/configuration_mobilebert.py +188 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py +58 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_mobilebert.py +1617 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_tf_mobilebert.py +1972 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert.py +518 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert_fast.py +189 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__init__.py +96 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/convert_perceiver_haiku_to_pytorch.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import TYPE_CHECKING
19
+
20
+ # rely on isort to merge the imports
21
+ from ...utils import (
22
+ OptionalDependencyNotAvailable,
23
+ _LazyModule,
24
+ is_torch_available,
25
+ is_tf_available,
26
+ )
27
+
28
+
29
+ _import_structure = {
30
+ "configuration_convnextv2": [
31
+ "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
32
+ "ConvNextV2Config",
33
+ ]
34
+ }
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_convnextv2"] = [
43
+ "CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "ConvNextV2ForImageClassification",
45
+ "ConvNextV2Model",
46
+ "ConvNextV2PreTrainedModel",
47
+ "ConvNextV2Backbone",
48
+ ]
49
+
50
+ try:
51
+ if not is_tf_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ _import_structure["modeling_tf_convnextv2"] = [
57
+ "TFConvNextV2ForImageClassification",
58
+ "TFConvNextV2Model",
59
+ "TFConvNextV2PreTrainedModel",
60
+ ]
61
+
62
+ if TYPE_CHECKING:
63
+ from .configuration_convnextv2 import (
64
+ CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
65
+ ConvNextV2Config,
66
+ )
67
+
68
+ try:
69
+ if not is_torch_available():
70
+ raise OptionalDependencyNotAvailable()
71
+ except OptionalDependencyNotAvailable:
72
+ pass
73
+ else:
74
+ from .modeling_convnextv2 import (
75
+ CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST,
76
+ ConvNextV2Backbone,
77
+ ConvNextV2ForImageClassification,
78
+ ConvNextV2Model,
79
+ ConvNextV2PreTrainedModel,
80
+ )
81
+
82
+ try:
83
+ if not is_tf_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_tf_convnextv2 import (
89
+ TFConvNextV2ForImageClassification,
90
+ TFConvNextV2Model,
91
+ TFConvNextV2PreTrainedModel,
92
+ )
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvNeXTV2 model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
27
+ }
28
+
29
+
30
+ class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an
33
+ ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2
35
+ [facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ num_channels (`int`, *optional*, defaults to 3):
42
+ The number of input channels.
43
+ patch_size (`int`, optional, defaults to 4):
44
+ Patch size to use in the patch embedding layer.
45
+ num_stages (`int`, optional, defaults to 4):
46
+ The number of stages in the model.
47
+ hidden_sizes (`List[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
48
+ Dimensionality (hidden size) at each stage.
49
+ depths (`List[int]`, *optional*, defaults to `[3, 3, 9, 3]`):
50
+ Depth (number of blocks) for each stage.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
53
+ `"selu"` and `"gelu_new"` are supported.
54
+ initializer_range (`float`, *optional*, defaults to 0.02):
55
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
56
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
57
+ The epsilon used by the layer normalization layers.
58
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
59
+ The drop rate for stochastic depth.
60
+ out_features (`List[str]`, *optional*):
61
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
62
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
63
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
64
+ same order as defined in the `stage_names` attribute.
65
+ out_indices (`List[int]`, *optional*):
66
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
67
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
68
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
69
+ same order as defined in the `stage_names` attribute.
70
+
71
+ Example:
72
+ ```python
73
+ >>> from transformers import ConvNeXTV2Config, ConvNextV2Model
74
+
75
+ >>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration
76
+ >>> configuration = ConvNeXTV2Config()
77
+
78
+ >>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration
79
+ >>> model = ConvNextV2Model(configuration)
80
+
81
+ >>> # Accessing the model configuration
82
+ >>> configuration = model.config
83
+ ```"""
84
+
85
+ model_type = "convnextv2"
86
+
87
+ def __init__(
88
+ self,
89
+ num_channels=3,
90
+ patch_size=4,
91
+ num_stages=4,
92
+ hidden_sizes=None,
93
+ depths=None,
94
+ hidden_act="gelu",
95
+ initializer_range=0.02,
96
+ layer_norm_eps=1e-12,
97
+ drop_path_rate=0.0,
98
+ image_size=224,
99
+ out_features=None,
100
+ out_indices=None,
101
+ **kwargs,
102
+ ):
103
+ super().__init__(**kwargs)
104
+
105
+ self.num_channels = num_channels
106
+ self.patch_size = patch_size
107
+ self.num_stages = num_stages
108
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
109
+ self.depths = [3, 3, 9, 3] if depths is None else depths
110
+ self.hidden_act = hidden_act
111
+ self.initializer_range = initializer_range
112
+ self.layer_norm_eps = layer_norm_eps
113
+ self.drop_path_rate = drop_path_rate
114
+ self.image_size = image_size
115
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
116
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
117
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
118
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvNeXTV2 checkpoints from the original repository.
16
+
17
+ URL: https://github.com/facebookresearch/ConvNeXt"""
18
+
19
+ import argparse
20
+ import json
21
+ import os
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import ConvNextImageProcessor, ConvNextV2Config, ConvNextV2ForImageClassification
29
+ from transformers.image_utils import PILImageResampling
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_convnextv2_config(checkpoint_url):
38
+ config = ConvNextV2Config()
39
+
40
+ if "atto" in checkpoint_url:
41
+ depths = [2, 2, 6, 2]
42
+ hidden_sizes = [40, 80, 160, 320]
43
+ if "femto" in checkpoint_url:
44
+ depths = [2, 2, 6, 2]
45
+ hidden_sizes = [48, 96, 192, 384]
46
+ if "pico" in checkpoint_url:
47
+ depths = [2, 2, 6, 2]
48
+ hidden_sizes = [64, 128, 256, 512]
49
+ if "nano" in checkpoint_url:
50
+ depths = [2, 2, 8, 2]
51
+ hidden_sizes = [80, 160, 320, 640]
52
+ if "tiny" in checkpoint_url:
53
+ depths = [3, 3, 9, 3]
54
+ hidden_sizes = [96, 192, 384, 768]
55
+ if "base" in checkpoint_url:
56
+ depths = [3, 3, 27, 3]
57
+ hidden_sizes = [128, 256, 512, 1024]
58
+ if "large" in checkpoint_url:
59
+ depths = [3, 3, 27, 3]
60
+ hidden_sizes = [192, 384, 768, 1536]
61
+ if "huge" in checkpoint_url:
62
+ depths = [3, 3, 27, 3]
63
+ hidden_sizes = [352, 704, 1408, 2816]
64
+
65
+ num_labels = 1000
66
+ filename = "imagenet-1k-id2label.json"
67
+ expected_shape = (1, 1000)
68
+
69
+ repo_id = "huggingface/label-files"
70
+ config.num_labels = num_labels
71
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
72
+ id2label = {int(k): v for k, v in id2label.items()}
73
+
74
+ config.id2label = id2label
75
+ config.label2id = {v: k for k, v in id2label.items()}
76
+ config.hidden_sizes = hidden_sizes
77
+ config.depths = depths
78
+
79
+ return config, expected_shape
80
+
81
+
82
+ def rename_key(name):
83
+ if "downsample_layers.0.0" in name:
84
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
85
+ if "downsample_layers.0.1" in name:
86
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
87
+ if "downsample_layers.1.0" in name:
88
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
89
+ if "downsample_layers.1.1" in name:
90
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
91
+ if "downsample_layers.2.0" in name:
92
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
93
+ if "downsample_layers.2.1" in name:
94
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
95
+ if "downsample_layers.3.0" in name:
96
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
97
+ if "downsample_layers.3.1" in name:
98
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
99
+ if "stages" in name and "downsampling_layer" not in name:
100
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
101
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
102
+ if "gamma" in name:
103
+ name = name.replace("gamma", "weight")
104
+ if "beta" in name:
105
+ name = name.replace("beta", "bias")
106
+ if "stages" in name:
107
+ name = name.replace("stages", "encoder.stages")
108
+ if "norm" in name:
109
+ name = name.replace("norm", "layernorm")
110
+ if "head" in name:
111
+ name = name.replace("head", "classifier")
112
+
113
+ return name
114
+
115
+
116
+ # We will verify our results on an image of cute cats
117
+ def prepare_img():
118
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
119
+ im = Image.open(requests.get(url, stream=True).raw)
120
+ return im
121
+
122
+
123
+ def convert_preprocessor(checkpoint_url):
124
+ if "224" in checkpoint_url:
125
+ size = 224
126
+ crop_pct = 224 / 256
127
+ elif "384" in checkpoint_url:
128
+ size = 384
129
+ crop_pct = None
130
+ else:
131
+ size = 512
132
+ crop_pct = None
133
+
134
+ return ConvNextImageProcessor(
135
+ size=size,
136
+ crop_pct=crop_pct,
137
+ image_mean=[0.485, 0.456, 0.406],
138
+ image_std=[0.229, 0.224, 0.225],
139
+ resample=PILImageResampling.BICUBIC,
140
+ )
141
+
142
+
143
+ @torch.no_grad()
144
+ def convert_convnextv2_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub):
145
+ """
146
+ Copy/paste/tweak model's weights to our ConvNeXTV2 structure.
147
+ """
148
+ print("Downloading original model from checkpoint...")
149
+ # define ConvNeXTV2 configuration based on URL
150
+ config, expected_shape = get_convnextv2_config(checkpoint_url)
151
+ # load original state_dict from URL
152
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
153
+
154
+ print("Converting model parameters...")
155
+ # rename keys
156
+ for key in state_dict.copy().keys():
157
+ val = state_dict.pop(key)
158
+ state_dict[rename_key(key)] = val
159
+ # add prefix to all keys expect classifier head
160
+ for key in state_dict.copy().keys():
161
+ val = state_dict.pop(key)
162
+ if not key.startswith("classifier"):
163
+ key = "convnextv2." + key
164
+ state_dict[key] = val
165
+
166
+ # load HuggingFace model
167
+ model = ConvNextV2ForImageClassification(config)
168
+ model.load_state_dict(state_dict)
169
+ model.eval()
170
+
171
+ # Check outputs on an image, prepared by ConvNextImageProcessor
172
+ preprocessor = convert_preprocessor(checkpoint_url)
173
+ inputs = preprocessor(images=prepare_img(), return_tensors="pt")
174
+ logits = model(**inputs).logits
175
+
176
+ # note: the logits below were obtained without center cropping
177
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt":
178
+ expected_logits = torch.tensor([-0.3930, 0.1747, -0.5246, 0.4177, 0.4295])
179
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt":
180
+ expected_logits = torch.tensor([-0.1727, -0.5341, -0.7818, -0.4745, -0.6566])
181
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt":
182
+ expected_logits = torch.tensor([-0.0333, 0.1563, -0.9137, 0.1054, 0.0381])
183
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt":
184
+ expected_logits = torch.tensor([-0.1744, -0.1555, -0.0713, 0.0950, -0.1431])
185
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt":
186
+ expected_logits = torch.tensor([0.9996, 0.1966, -0.4386, -0.3472, 0.6661])
187
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt":
188
+ expected_logits = torch.tensor([-0.2553, -0.6708, -0.1359, 0.2518, -0.2488])
189
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt":
190
+ expected_logits = torch.tensor([-0.0673, -0.5627, -0.3753, -0.2722, 0.0178])
191
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt":
192
+ expected_logits = torch.tensor([-0.6377, -0.7458, -0.2150, 0.1184, -0.0597])
193
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt":
194
+ expected_logits = torch.tensor([1.0799, 0.2322, -0.8860, 1.0219, 0.6231])
195
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt":
196
+ expected_logits = torch.tensor([0.3766, 0.4917, -1.1426, 0.9942, 0.6024])
197
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt":
198
+ expected_logits = torch.tensor([0.4220, -0.6919, -0.4317, -0.2881, -0.6609])
199
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt":
200
+ expected_logits = torch.tensor([0.1082, -0.8286, -0.5095, 0.4681, -0.8085])
201
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt":
202
+ expected_logits = torch.tensor([-0.2419, -0.6221, 0.2176, -0.0980, -0.7527])
203
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt":
204
+ expected_logits = torch.tensor([0.0391, -0.4371, 0.3786, 0.1251, -0.2784])
205
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt":
206
+ expected_logits = torch.tensor([-0.0504, 0.5636, -0.1729, -0.6507, -0.3949])
207
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt":
208
+ expected_logits = torch.tensor([0.3560, 0.9486, 0.3149, -0.2667, -0.5138])
209
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt":
210
+ expected_logits = torch.tensor([-0.2469, -0.4550, -0.5853, -0.0810, 0.0309])
211
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt":
212
+ expected_logits = torch.tensor([-0.3090, 0.0802, -0.0682, -0.1979, -0.2826])
213
+ else:
214
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
215
+
216
+ assert torch.allclose(logits[0, :5], expected_logits, atol=1e-3)
217
+ assert logits.shape == expected_shape
218
+ print("Model outputs match the original results!")
219
+
220
+ if save_model:
221
+ print("Saving model to local...")
222
+ # Create folder to save model
223
+ if not os.path.isdir(pytorch_dump_folder_path):
224
+ os.mkdir(pytorch_dump_folder_path)
225
+
226
+ model.save_pretrained(pytorch_dump_folder_path)
227
+ preprocessor.save_pretrained(pytorch_dump_folder_path)
228
+
229
+ model_name = "convnextv2"
230
+ if "atto" in checkpoint_url:
231
+ model_name += "-atto"
232
+ if "femto" in checkpoint_url:
233
+ model_name += "-femto"
234
+ if "pico" in checkpoint_url:
235
+ model_name += "-pico"
236
+ if "nano" in checkpoint_url:
237
+ model_name += "-nano"
238
+ elif "tiny" in checkpoint_url:
239
+ model_name += "-tiny"
240
+ elif "base" in checkpoint_url:
241
+ model_name += "-base"
242
+ elif "large" in checkpoint_url:
243
+ model_name += "-large"
244
+ elif "huge" in checkpoint_url:
245
+ model_name += "-huge"
246
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
247
+ model_name += "-22k"
248
+ elif "22k" in checkpoint_url and "1k" in checkpoint_url:
249
+ model_name += "-22k-1k"
250
+ elif "1k" in checkpoint_url:
251
+ model_name += "-1k"
252
+ if "224" in checkpoint_url:
253
+ model_name += "-224"
254
+ elif "384" in checkpoint_url:
255
+ model_name += "-384"
256
+ elif "512" in checkpoint_url:
257
+ model_name += "-512"
258
+
259
+ if push_to_hub:
260
+ print(f"Pushing {model_name} to the hub...")
261
+ model.push_to_hub(model_name)
262
+ preprocessor.push_to_hub(model_name)
263
+
264
+
265
+ if __name__ == "__main__":
266
+ parser = argparse.ArgumentParser()
267
+ # Required parameters
268
+ parser.add_argument(
269
+ "--checkpoint_url",
270
+ default="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt",
271
+ type=str,
272
+ help="URL of the original ConvNeXTV2 checkpoint you'd like to convert.",
273
+ )
274
+ parser.add_argument(
275
+ "--pytorch_dump_folder_path",
276
+ default="model",
277
+ type=str,
278
+ help="Path to the output PyTorch model directory.",
279
+ )
280
+ parser.add_argument("--save_model", action="store_true", help="Save model to local")
281
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub")
282
+
283
+ args = parser.parse_args()
284
+ convert_convnextv2_checkpoint(
285
+ args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub
286
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ConvNextV2 model."""
16
+
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BackboneOutput,
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from ...utils.backbone_utils import BackboneMixin
41
+ from .configuration_convnextv2 import ConvNextV2Config
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # General docstring
47
+ _CONFIG_FOR_DOC = "ConvNextV2Config"
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
51
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
52
+
53
+ # Image classification docstring
54
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
55
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
56
+
57
+ CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
58
+ "facebook/convnextv2-tiny-1k-224",
59
+ # See all ConvNextV2 models at https://huggingface.co/models?filter=convnextv2
60
+ ]
61
+
62
+
63
+ # Copied from transformers.models.beit.modeling_beit.drop_path
64
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
65
+ """
66
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
67
+
68
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
69
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
70
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
71
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
72
+ argument.
73
+ """
74
+ if drop_prob == 0.0 or not training:
75
+ return input
76
+ keep_prob = 1 - drop_prob
77
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
78
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
79
+ random_tensor.floor_() # binarize
80
+ output = input.div(keep_prob) * random_tensor
81
+ return output
82
+
83
+
84
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNextV2
85
+ class ConvNextV2DropPath(nn.Module):
86
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
87
+
88
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
89
+ super().__init__()
90
+ self.drop_prob = drop_prob
91
+
92
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
93
+ return drop_path(hidden_states, self.drop_prob, self.training)
94
+
95
+ def extra_repr(self) -> str:
96
+ return "p={}".format(self.drop_prob)
97
+
98
+
99
+ class ConvNextV2GRN(nn.Module):
100
+ """GRN (Global Response Normalization) layer"""
101
+
102
+ def __init__(self, dim: int):
103
+ super().__init__()
104
+ self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
105
+ self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))
106
+
107
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
108
+ # Compute and normalize global spatial feature maps
109
+ global_features = torch.norm(hidden_states, p=2, dim=(1, 2), keepdim=True)
110
+ norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-6)
111
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
112
+
113
+ return hidden_states
114
+
115
+
116
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->ConvNextV2
117
+ class ConvNextV2LayerNorm(nn.Module):
118
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
119
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
120
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
121
+ """
122
+
123
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
124
+ super().__init__()
125
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
126
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
127
+ self.eps = eps
128
+ self.data_format = data_format
129
+ if self.data_format not in ["channels_last", "channels_first"]:
130
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
131
+ self.normalized_shape = (normalized_shape,)
132
+
133
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
134
+ if self.data_format == "channels_last":
135
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
136
+ elif self.data_format == "channels_first":
137
+ input_dtype = x.dtype
138
+ x = x.float()
139
+ u = x.mean(1, keepdim=True)
140
+ s = (x - u).pow(2).mean(1, keepdim=True)
141
+ x = (x - u) / torch.sqrt(s + self.eps)
142
+ x = x.to(dtype=input_dtype)
143
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
144
+ return x
145
+
146
+
147
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextEmbeddings with ConvNext->ConvNextV2
148
+ class ConvNextV2Embeddings(nn.Module):
149
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
150
+ found in src/transformers/models/swin/modeling_swin.py.
151
+ """
152
+
153
+ def __init__(self, config):
154
+ super().__init__()
155
+ self.patch_embeddings = nn.Conv2d(
156
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
157
+ )
158
+ self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
159
+ self.num_channels = config.num_channels
160
+
161
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
162
+ num_channels = pixel_values.shape[1]
163
+ if num_channels != self.num_channels:
164
+ raise ValueError(
165
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
166
+ )
167
+ embeddings = self.patch_embeddings(pixel_values)
168
+ embeddings = self.layernorm(embeddings)
169
+ return embeddings
170
+
171
+
172
+ class ConvNextV2Layer(nn.Module):
173
+ """This corresponds to the `Block` class in the original implementation.
174
+
175
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
176
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
177
+
178
+ The authors used (2) as they find it slightly faster in PyTorch.
179
+
180
+ Args:
181
+ config ([`ConvNextV2Config`]): Model configuration class.
182
+ dim (`int`): Number of input channels.
183
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
184
+ """
185
+
186
+ def __init__(self, config, dim, drop_path=0):
187
+ super().__init__()
188
+ # depthwise conv
189
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
190
+ self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-6)
191
+ # pointwise/1x1 convs, implemented with linear layers
192
+ self.pwconv1 = nn.Linear(dim, 4 * dim)
193
+ self.act = ACT2FN[config.hidden_act]
194
+ self.grn = ConvNextV2GRN(4 * dim)
195
+ self.pwconv2 = nn.Linear(4 * dim, dim)
196
+ self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
197
+
198
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
199
+ input = hidden_states
200
+ x = self.dwconv(hidden_states)
201
+ # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
202
+ x = x.permute(0, 2, 3, 1)
203
+ x = self.layernorm(x)
204
+ x = self.pwconv1(x)
205
+ x = self.act(x)
206
+ x = self.grn(x)
207
+ x = self.pwconv2(x)
208
+ # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
209
+ x = x.permute(0, 3, 1, 2)
210
+
211
+ x = input + self.drop_path(x)
212
+ return x
213
+
214
+
215
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextStage with ConvNeXT->ConvNeXTV2, ConvNext->ConvNextV2
216
+ class ConvNextV2Stage(nn.Module):
217
+ """ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
218
+
219
+ Args:
220
+ config ([`ConvNextV2Config`]): Model configuration class.
221
+ in_channels (`int`): Number of input channels.
222
+ out_channels (`int`): Number of output channels.
223
+ depth (`int`): Number of residual blocks.
224
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
225
+ """
226
+
227
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
228
+ super().__init__()
229
+
230
+ if in_channels != out_channels or stride > 1:
231
+ self.downsampling_layer = nn.Sequential(
232
+ ConvNextV2LayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
233
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
234
+ )
235
+ else:
236
+ self.downsampling_layer = nn.Identity()
237
+ drop_path_rates = drop_path_rates or [0.0] * depth
238
+ self.layers = nn.Sequential(
239
+ *[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
240
+ )
241
+
242
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
243
+ hidden_states = self.downsampling_layer(hidden_states)
244
+ hidden_states = self.layers(hidden_states)
245
+ return hidden_states
246
+
247
+
248
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextEncoder with ConvNext->ConvNextV2
249
+ class ConvNextV2Encoder(nn.Module):
250
+ def __init__(self, config):
251
+ super().__init__()
252
+ self.stages = nn.ModuleList()
253
+ drop_path_rates = [
254
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
255
+ ]
256
+ prev_chs = config.hidden_sizes[0]
257
+ for i in range(config.num_stages):
258
+ out_chs = config.hidden_sizes[i]
259
+ stage = ConvNextV2Stage(
260
+ config,
261
+ in_channels=prev_chs,
262
+ out_channels=out_chs,
263
+ stride=2 if i > 0 else 1,
264
+ depth=config.depths[i],
265
+ drop_path_rates=drop_path_rates[i],
266
+ )
267
+ self.stages.append(stage)
268
+ prev_chs = out_chs
269
+
270
+ def forward(
271
+ self,
272
+ hidden_states: torch.FloatTensor,
273
+ output_hidden_states: Optional[bool] = False,
274
+ return_dict: Optional[bool] = True,
275
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
276
+ all_hidden_states = () if output_hidden_states else None
277
+
278
+ for i, layer_module in enumerate(self.stages):
279
+ if output_hidden_states:
280
+ all_hidden_states = all_hidden_states + (hidden_states,)
281
+
282
+ hidden_states = layer_module(hidden_states)
283
+
284
+ if output_hidden_states:
285
+ all_hidden_states = all_hidden_states + (hidden_states,)
286
+
287
+ if not return_dict:
288
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
289
+
290
+ return BaseModelOutputWithNoAttention(
291
+ last_hidden_state=hidden_states,
292
+ hidden_states=all_hidden_states,
293
+ )
294
+
295
+
296
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextPreTrainedModel with ConvNext->ConvNextV2, convnext->convnextv2
297
+ class ConvNextV2PreTrainedModel(PreTrainedModel):
298
+ """
299
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
300
+ models.
301
+ """
302
+
303
+ config_class = ConvNextV2Config
304
+ base_model_prefix = "convnextv2"
305
+ main_input_name = "pixel_values"
306
+
307
+ def _init_weights(self, module):
308
+ """Initialize the weights"""
309
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
310
+ # Slightly different from the TF version which uses truncated_normal for initialization
311
+ # cf https://github.com/pytorch/pytorch/pull/5617
312
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
313
+ if module.bias is not None:
314
+ module.bias.data.zero_()
315
+ elif isinstance(module, nn.LayerNorm):
316
+ module.bias.data.zero_()
317
+ module.weight.data.fill_(1.0)
318
+
319
+
320
+ CONVNEXTV2_START_DOCSTRING = r"""
321
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
322
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
323
+ behavior.
324
+
325
+ Parameters:
326
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
327
+ Initializing with a config file does not load the weights associated with the model, only the
328
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
329
+ """
330
+
331
+ CONVNEXTV2_INPUTS_DOCSTRING = r"""
332
+ Args:
333
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
334
+ Pixel values. Pixel values can be obtained using [`ConvNextImageProcessor`]. See
335
+ [`ConvNextImageProcessor.__call__`] for details.
336
+ output_hidden_states (`bool`, *optional*):
337
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
338
+ more detail.
339
+ return_dict (`bool`, *optional*):
340
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
341
+ """
342
+
343
+
344
+ @add_start_docstrings(
345
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
346
+ CONVNEXTV2_START_DOCSTRING,
347
+ )
348
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextModel with CONVNEXT->CONVNEXTV2, ConvNext->ConvNextV2
349
+ class ConvNextV2Model(ConvNextV2PreTrainedModel):
350
+ def __init__(self, config):
351
+ super().__init__(config)
352
+ self.config = config
353
+
354
+ self.embeddings = ConvNextV2Embeddings(config)
355
+ self.encoder = ConvNextV2Encoder(config)
356
+
357
+ # final layernorm layer
358
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
359
+
360
+ # Initialize weights and apply final processing
361
+ self.post_init()
362
+
363
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
364
+ @add_code_sample_docstrings(
365
+ checkpoint=_CHECKPOINT_FOR_DOC,
366
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
367
+ config_class=_CONFIG_FOR_DOC,
368
+ modality="vision",
369
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
370
+ )
371
+ def forward(
372
+ self,
373
+ pixel_values: torch.FloatTensor = None,
374
+ output_hidden_states: Optional[bool] = None,
375
+ return_dict: Optional[bool] = None,
376
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
377
+ output_hidden_states = (
378
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
379
+ )
380
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
381
+
382
+ if pixel_values is None:
383
+ raise ValueError("You have to specify pixel_values")
384
+
385
+ embedding_output = self.embeddings(pixel_values)
386
+
387
+ encoder_outputs = self.encoder(
388
+ embedding_output,
389
+ output_hidden_states=output_hidden_states,
390
+ return_dict=return_dict,
391
+ )
392
+
393
+ last_hidden_state = encoder_outputs[0]
394
+
395
+ # global average pooling, (N, C, H, W) -> (N, C)
396
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
397
+
398
+ if not return_dict:
399
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
400
+
401
+ return BaseModelOutputWithPoolingAndNoAttention(
402
+ last_hidden_state=last_hidden_state,
403
+ pooler_output=pooled_output,
404
+ hidden_states=encoder_outputs.hidden_states,
405
+ )
406
+
407
+
408
+ @add_start_docstrings(
409
+ """
410
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
411
+ ImageNet.
412
+ """,
413
+ CONVNEXTV2_START_DOCSTRING,
414
+ )
415
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextForImageClassification with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,convnext->convnextv2
416
+ class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel):
417
+ def __init__(self, config):
418
+ super().__init__(config)
419
+
420
+ self.num_labels = config.num_labels
421
+ self.convnextv2 = ConvNextV2Model(config)
422
+
423
+ # Classifier head
424
+ self.classifier = (
425
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
426
+ )
427
+
428
+ # Initialize weights and apply final processing
429
+ self.post_init()
430
+
431
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
432
+ @add_code_sample_docstrings(
433
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
434
+ output_type=ImageClassifierOutputWithNoAttention,
435
+ config_class=_CONFIG_FOR_DOC,
436
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
437
+ )
438
+ def forward(
439
+ self,
440
+ pixel_values: torch.FloatTensor = None,
441
+ labels: Optional[torch.LongTensor] = None,
442
+ output_hidden_states: Optional[bool] = None,
443
+ return_dict: Optional[bool] = None,
444
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
445
+ r"""
446
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
447
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
448
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
449
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
450
+ """
451
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
452
+
453
+ outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
454
+
455
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
456
+
457
+ logits = self.classifier(pooled_output)
458
+
459
+ loss = None
460
+ if labels is not None:
461
+ if self.config.problem_type is None:
462
+ if self.num_labels == 1:
463
+ self.config.problem_type = "regression"
464
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
465
+ self.config.problem_type = "single_label_classification"
466
+ else:
467
+ self.config.problem_type = "multi_label_classification"
468
+
469
+ if self.config.problem_type == "regression":
470
+ loss_fct = MSELoss()
471
+ if self.num_labels == 1:
472
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
473
+ else:
474
+ loss = loss_fct(logits, labels)
475
+ elif self.config.problem_type == "single_label_classification":
476
+ loss_fct = CrossEntropyLoss()
477
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
478
+ elif self.config.problem_type == "multi_label_classification":
479
+ loss_fct = BCEWithLogitsLoss()
480
+ loss = loss_fct(logits, labels)
481
+ if not return_dict:
482
+ output = (logits,) + outputs[2:]
483
+ return ((loss,) + output) if loss is not None else output
484
+
485
+ return ImageClassifierOutputWithNoAttention(
486
+ loss=loss,
487
+ logits=logits,
488
+ hidden_states=outputs.hidden_states,
489
+ )
490
+
491
+
492
+ @add_start_docstrings(
493
+ """
494
+ ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.
495
+ """,
496
+ CONVNEXTV2_START_DOCSTRING,
497
+ )
498
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextBackbone with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,facebook/convnext-tiny-224->facebook/convnextv2-tiny-1k-224
499
+ class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin):
500
+ def __init__(self, config):
501
+ super().__init__(config)
502
+ super()._init_backbone(config)
503
+
504
+ self.embeddings = ConvNextV2Embeddings(config)
505
+ self.encoder = ConvNextV2Encoder(config)
506
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
507
+
508
+ # Add layer norms to hidden states of out_features
509
+ hidden_states_norms = {}
510
+ for stage, num_channels in zip(self._out_features, self.channels):
511
+ hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format="channels_first")
512
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
513
+
514
+ # initialize weights and apply final processing
515
+ self.post_init()
516
+
517
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
518
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
519
+ def forward(
520
+ self,
521
+ pixel_values: torch.Tensor,
522
+ output_hidden_states: Optional[bool] = None,
523
+ return_dict: Optional[bool] = None,
524
+ ) -> BackboneOutput:
525
+ """
526
+ Returns:
527
+
528
+ Examples:
529
+
530
+ ```python
531
+ >>> from transformers import AutoImageProcessor, AutoBackbone
532
+ >>> import torch
533
+ >>> from PIL import Image
534
+ >>> import requests
535
+
536
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
537
+ >>> image = Image.open(requests.get(url, stream=True).raw)
538
+
539
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224")
540
+ >>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224")
541
+
542
+ >>> inputs = processor(image, return_tensors="pt")
543
+ >>> outputs = model(**inputs)
544
+ ```"""
545
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
546
+ output_hidden_states = (
547
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
548
+ )
549
+
550
+ embedding_output = self.embeddings(pixel_values)
551
+
552
+ outputs = self.encoder(
553
+ embedding_output,
554
+ output_hidden_states=True,
555
+ return_dict=return_dict,
556
+ )
557
+
558
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
559
+
560
+ feature_maps = ()
561
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
562
+ if stage in self.out_features:
563
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
564
+ feature_maps += (hidden_state,)
565
+
566
+ if not return_dict:
567
+ output = (feature_maps,)
568
+ if output_hidden_states:
569
+ output += (hidden_states,)
570
+ return output
571
+
572
+ return BackboneOutput(
573
+ feature_maps=feature_maps,
574
+ hidden_states=hidden_states if output_hidden_states else None,
575
+ attentions=None,
576
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 ConvNextV2 model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import (
27
+ TFBaseModelOutputWithNoAttention,
28
+ TFBaseModelOutputWithPooling,
29
+ TFBaseModelOutputWithPoolingAndNoAttention,
30
+ TFImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_tf_utils import (
33
+ TFModelInputType,
34
+ TFPreTrainedModel,
35
+ TFSequenceClassificationLoss,
36
+ get_initializer,
37
+ keras,
38
+ keras_serializable,
39
+ unpack_inputs,
40
+ )
41
+ from ...tf_utils import shape_list
42
+ from ...utils import (
43
+ add_code_sample_docstrings,
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ logging,
47
+ )
48
+ from .configuration_convnextv2 import ConvNextV2Config
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ # General docstring
54
+ _CONFIG_FOR_DOC = "ConvNextV2Config"
55
+
56
+ # Base docstring
57
+ _CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
58
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
59
+
60
+ # Image classification docstring
61
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
62
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
63
+
64
+ CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
65
+ "facebook/convnextv2-tiny-1k-224",
66
+ # See all ConvNextV2 models at https://huggingface.co/models?filter=convnextv2
67
+ ]
68
+
69
+
70
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->ConvNextV2
71
+ class TFConvNextV2DropPath(keras.layers.Layer):
72
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
73
+ References:
74
+ (1) github.com:rwightman/pytorch-image-models
75
+ """
76
+
77
+ def __init__(self, drop_path: float, **kwargs):
78
+ super().__init__(**kwargs)
79
+ self.drop_path = drop_path
80
+
81
+ def call(self, x: tf.Tensor, training=None):
82
+ if training:
83
+ keep_prob = 1 - self.drop_path
84
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
85
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
86
+ random_tensor = tf.floor(random_tensor)
87
+ return (x / keep_prob) * random_tensor
88
+ return x
89
+
90
+
91
+ class TFConvNextV2GRN(keras.layers.Layer):
92
+ """GRN (Global Response Normalization) layer"""
93
+
94
+ def __init__(self, config: ConvNextV2Config, dim: int, **kwargs):
95
+ super().__init__(**kwargs)
96
+ self.dim = dim
97
+
98
+ def build(self, input_shape: tf.TensorShape = None):
99
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
100
+ self.weight = self.add_weight(
101
+ name="weight",
102
+ shape=(1, 1, 1, self.dim),
103
+ initializer=keras.initializers.Zeros(),
104
+ )
105
+ self.bias = self.add_weight(
106
+ name="bias",
107
+ shape=(1, 1, 1, self.dim),
108
+ initializer=keras.initializers.Zeros(),
109
+ )
110
+ return super().build(input_shape)
111
+
112
+ def call(self, hidden_states: tf.Tensor):
113
+ global_features = tf.norm(hidden_states, ord="euclidean", axis=(1, 2), keepdims=True)
114
+ norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-6)
115
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
116
+ return hidden_states
117
+
118
+
119
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextEmbeddings with ConvNext->ConvNextV2
120
+ class TFConvNextV2Embeddings(keras.layers.Layer):
121
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
122
+ found in src/transformers/models/swin/modeling_swin.py.
123
+ """
124
+
125
+ def __init__(self, config: ConvNextV2Config, **kwargs):
126
+ super().__init__(**kwargs)
127
+ self.patch_embeddings = keras.layers.Conv2D(
128
+ filters=config.hidden_sizes[0],
129
+ kernel_size=config.patch_size,
130
+ strides=config.patch_size,
131
+ name="patch_embeddings",
132
+ kernel_initializer=get_initializer(config.initializer_range),
133
+ bias_initializer=keras.initializers.Zeros(),
134
+ )
135
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
136
+ self.num_channels = config.num_channels
137
+ self.config = config
138
+
139
+ def call(self, pixel_values):
140
+ if isinstance(pixel_values, dict):
141
+ pixel_values = pixel_values["pixel_values"]
142
+
143
+ tf.debugging.assert_equal(
144
+ shape_list(pixel_values)[1],
145
+ self.num_channels,
146
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
147
+ )
148
+
149
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
150
+ # So change the input format from `NCHW` to `NHWC`.
151
+ # shape = (batch_size, in_height, in_width, in_channels)
152
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
153
+
154
+ embeddings = self.patch_embeddings(pixel_values)
155
+ embeddings = self.layernorm(embeddings)
156
+ return embeddings
157
+
158
+ def build(self, input_shape=None):
159
+ if self.built:
160
+ return
161
+ self.built = True
162
+ if getattr(self, "patch_embeddings", None) is not None:
163
+ with tf.name_scope(self.patch_embeddings.name):
164
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
165
+ if getattr(self, "layernorm", None) is not None:
166
+ with tf.name_scope(self.layernorm.name):
167
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
168
+
169
+
170
+ class TFConvNextV2Layer(keras.layers.Layer):
171
+ """This corresponds to the `Block` class in the original implementation.
172
+
173
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
174
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
175
+
176
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
177
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
178
+
179
+ Args:
180
+ config (`ConvNextV2Config`):
181
+ Model configuration class.
182
+ dim (`int`):
183
+ Number of input channels.
184
+ drop_path (`float`, defaults to 0.0):
185
+ Stochastic depth rate.
186
+ """
187
+
188
+ def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float = 0.0, **kwargs):
189
+ super().__init__(**kwargs)
190
+ self.dim = dim
191
+ self.config = config
192
+ self.dwconv = keras.layers.Conv2D(
193
+ filters=dim,
194
+ kernel_size=7,
195
+ padding="same",
196
+ groups=dim,
197
+ kernel_initializer=get_initializer(config.initializer_range),
198
+ bias_initializer=keras.initializers.Zeros(),
199
+ name="dwconv",
200
+ ) # depthwise conv
201
+ self.layernorm = keras.layers.LayerNormalization(
202
+ epsilon=1e-6,
203
+ name="layernorm",
204
+ )
205
+ self.pwconv1 = keras.layers.Dense(
206
+ units=4 * dim,
207
+ kernel_initializer=get_initializer(config.initializer_range),
208
+ bias_initializer=keras.initializers.Zeros(),
209
+ name="pwconv1",
210
+ ) # pointwise/1x1 convs, implemented with linear layers
211
+ self.act = get_tf_activation(config.hidden_act)
212
+ self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name="grn")
213
+ self.pwconv2 = keras.layers.Dense(
214
+ units=dim,
215
+ kernel_initializer=get_initializer(config.initializer_range),
216
+ bias_initializer=keras.initializers.Zeros(),
217
+ name="pwconv2",
218
+ )
219
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
220
+ # behaviour.
221
+ self.drop_path = (
222
+ TFConvNextV2DropPath(drop_path, name="drop_path")
223
+ if drop_path > 0.0
224
+ else keras.layers.Activation("linear", name="drop_path")
225
+ )
226
+
227
+ def call(self, hidden_states, training=False):
228
+ input = hidden_states
229
+ x = self.dwconv(hidden_states)
230
+ x = self.layernorm(x)
231
+ x = self.pwconv1(x)
232
+ x = self.act(x)
233
+ x = self.grn(x)
234
+ x = self.pwconv2(x)
235
+ x = self.drop_path(x, training=training)
236
+ x = input + x
237
+ return x
238
+
239
+ def build(self, input_shape=None):
240
+ if self.built:
241
+ return
242
+ self.built = True
243
+ if getattr(self, "dwconv", None) is not None:
244
+ with tf.name_scope(self.dwconv.name):
245
+ self.dwconv.build([None, None, None, self.dim])
246
+ if getattr(self, "layernorm", None) is not None:
247
+ with tf.name_scope(self.layernorm.name):
248
+ self.layernorm.build([None, None, None, self.dim])
249
+ if getattr(self, "pwconv1", None) is not None:
250
+ with tf.name_scope(self.pwconv1.name):
251
+ self.pwconv1.build([None, None, self.dim])
252
+ if getattr(self, "grn", None) is not None:
253
+ with tf.name_scope(self.grn.name):
254
+ self.grn.build(None)
255
+ if getattr(self, "pwconv2", None) is not None:
256
+ with tf.name_scope(self.pwconv2.name):
257
+ self.pwconv2.build([None, None, 4 * self.dim])
258
+ if getattr(self, "drop_path", None) is not None:
259
+ with tf.name_scope(self.drop_path.name):
260
+ self.drop_path.build(None)
261
+
262
+
263
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextStage with ConvNext->ConvNextV2
264
+ class TFConvNextV2Stage(keras.layers.Layer):
265
+ """ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
266
+
267
+ Args:
268
+ config (`ConvNextV2V2Config`):
269
+ Model configuration class.
270
+ in_channels (`int`):
271
+ Number of input channels.
272
+ out_channels (`int`):
273
+ Number of output channels.
274
+ depth (`int`):
275
+ Number of residual blocks.
276
+ drop_path_rates(`List[float]`):
277
+ Stochastic depth rates for each layer.
278
+ """
279
+
280
+ def __init__(
281
+ self,
282
+ config: ConvNextV2Config,
283
+ in_channels: int,
284
+ out_channels: int,
285
+ kernel_size: int = 2,
286
+ stride: int = 2,
287
+ depth: int = 2,
288
+ drop_path_rates: Optional[List[float]] = None,
289
+ **kwargs,
290
+ ):
291
+ super().__init__(**kwargs)
292
+ if in_channels != out_channels or stride > 1:
293
+ self.downsampling_layer = [
294
+ keras.layers.LayerNormalization(
295
+ epsilon=1e-6,
296
+ name="downsampling_layer.0",
297
+ ),
298
+ # Inputs to this layer will follow NHWC format since we
299
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextV2Embeddings`
300
+ # layer. All the outputs throughout the model will be in NHWC
301
+ # from this point on until the output where we again change to
302
+ # NCHW.
303
+ keras.layers.Conv2D(
304
+ filters=out_channels,
305
+ kernel_size=kernel_size,
306
+ strides=stride,
307
+ kernel_initializer=get_initializer(config.initializer_range),
308
+ bias_initializer=keras.initializers.Zeros(),
309
+ name="downsampling_layer.1",
310
+ ),
311
+ ]
312
+ else:
313
+ self.downsampling_layer = [tf.identity]
314
+
315
+ drop_path_rates = drop_path_rates or [0.0] * depth
316
+ self.layers = [
317
+ TFConvNextV2Layer(
318
+ config,
319
+ dim=out_channels,
320
+ drop_path=drop_path_rates[j],
321
+ name=f"layers.{j}",
322
+ )
323
+ for j in range(depth)
324
+ ]
325
+ self.in_channels = in_channels
326
+ self.out_channels = out_channels
327
+ self.stride = stride
328
+
329
+ def call(self, hidden_states):
330
+ for layer in self.downsampling_layer:
331
+ hidden_states = layer(hidden_states)
332
+ for layer in self.layers:
333
+ hidden_states = layer(hidden_states)
334
+ return hidden_states
335
+
336
+ def build(self, input_shape=None):
337
+ if self.built:
338
+ return
339
+ self.built = True
340
+ if getattr(self, "layers", None) is not None:
341
+ for layer in self.layers:
342
+ with tf.name_scope(layer.name):
343
+ layer.build(None)
344
+ if self.in_channels != self.out_channels or self.stride > 1:
345
+ with tf.name_scope(self.downsampling_layer[0].name):
346
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
347
+ with tf.name_scope(self.downsampling_layer[1].name):
348
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
349
+
350
+
351
+ class TFConvNextV2Encoder(keras.layers.Layer):
352
+ def __init__(self, config: ConvNextV2Config, **kwargs):
353
+ super().__init__(**kwargs)
354
+ self.stages = []
355
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
356
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
357
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
358
+ prev_chs = config.hidden_sizes[0]
359
+ for i in range(config.num_stages):
360
+ out_chs = config.hidden_sizes[i]
361
+ stage = TFConvNextV2Stage(
362
+ config,
363
+ in_channels=prev_chs,
364
+ out_channels=out_chs,
365
+ stride=2 if i > 0 else 1,
366
+ depth=config.depths[i],
367
+ drop_path_rates=drop_path_rates[i],
368
+ name=f"stages.{i}",
369
+ )
370
+ self.stages.append(stage)
371
+ prev_chs = out_chs
372
+
373
+ def call(
374
+ self,
375
+ hidden_states: tf.Tensor,
376
+ output_hidden_states: Optional[bool] = False,
377
+ return_dict: Optional[bool] = True,
378
+ ) -> Union[Tuple, TFBaseModelOutputWithNoAttention]:
379
+ all_hidden_states = () if output_hidden_states else None
380
+
381
+ for i, layer_module in enumerate(self.stages):
382
+ if output_hidden_states:
383
+ all_hidden_states = all_hidden_states + (hidden_states,)
384
+
385
+ hidden_states = layer_module(hidden_states)
386
+
387
+ if output_hidden_states:
388
+ all_hidden_states = all_hidden_states + (hidden_states,)
389
+
390
+ if not return_dict:
391
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
392
+
393
+ return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
394
+
395
+ def build(self, input_shape=None):
396
+ for stage in self.stages:
397
+ with tf.name_scope(stage.name):
398
+ stage.build(None)
399
+
400
+
401
+ @keras_serializable
402
+ class TFConvNextV2MainLayer(keras.layers.Layer):
403
+ config_class = ConvNextV2Config
404
+
405
+ def __init__(self, config: ConvNextV2Config, **kwargs):
406
+ super().__init__(**kwargs)
407
+
408
+ self.config = config
409
+ self.embeddings = TFConvNextV2Embeddings(config, name="embeddings")
410
+ self.encoder = TFConvNextV2Encoder(config, name="encoder")
411
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
412
+ # We are setting the `data_format` like so because from here on we will revert to the
413
+ # NCHW output format
414
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_last")
415
+
416
+ @unpack_inputs
417
+ def call(
418
+ self,
419
+ pixel_values: TFModelInputType | None = None,
420
+ output_hidden_states: Optional[bool] = None,
421
+ return_dict: Optional[bool] = None,
422
+ training: bool = False,
423
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
424
+ output_hidden_states = (
425
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
426
+ )
427
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
428
+
429
+ if pixel_values is None:
430
+ raise ValueError("You have to specify pixel_values")
431
+
432
+ embedding_output = self.embeddings(pixel_values, training=training)
433
+
434
+ encoder_outputs = self.encoder(
435
+ embedding_output,
436
+ output_hidden_states=output_hidden_states,
437
+ return_dict=return_dict,
438
+ training=training,
439
+ )
440
+
441
+ last_hidden_state = encoder_outputs[0]
442
+
443
+ # Change to NCHW output format have uniformity in the modules
444
+ pooled_output = self.pooler(last_hidden_state)
445
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
446
+ pooled_output = self.layernorm(pooled_output)
447
+
448
+ # Change the other hidden state outputs to NCHW as well
449
+ if output_hidden_states:
450
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
451
+
452
+ if not return_dict:
453
+ hidden_states = hidden_states if output_hidden_states else ()
454
+ return (last_hidden_state, pooled_output) + hidden_states
455
+
456
+ return TFBaseModelOutputWithPoolingAndNoAttention(
457
+ last_hidden_state=last_hidden_state,
458
+ pooler_output=pooled_output,
459
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
460
+ )
461
+
462
+ def build(self, input_shape=None):
463
+ if self.built:
464
+ return
465
+ self.built = True
466
+ if getattr(self, "embeddings", None) is not None:
467
+ with tf.name_scope(self.embeddings.name):
468
+ self.embeddings.build(None)
469
+ if getattr(self, "encoder", None) is not None:
470
+ with tf.name_scope(self.encoder.name):
471
+ self.encoder.build(None)
472
+ if getattr(self, "layernorm", None) is not None:
473
+ with tf.name_scope(self.layernorm.name):
474
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
475
+
476
+
477
+ class TFConvNextV2PreTrainedModel(TFPreTrainedModel):
478
+ """
479
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
480
+ models.
481
+ """
482
+
483
+ config_class = ConvNextV2Config
484
+ base_model_prefix = "convnextv2"
485
+ main_input_name = "pixel_values"
486
+
487
+
488
+ CONVNEXTV2_START_DOCSTRING = r"""
489
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
490
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
491
+ etc.)
492
+
493
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
494
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
495
+ behavior.
496
+
497
+ <Tip>
498
+
499
+ TensorFlow models and layers in `transformers` accept two formats as input:
500
+
501
+ - having all inputs as keyword arguments (like PyTorch models), or
502
+ - having all inputs as a list, tuple or dict in the first positional argument.
503
+
504
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
505
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
506
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
507
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
508
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
509
+ positional argument:
510
+
511
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
512
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
513
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
514
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
515
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
516
+
517
+ Note that when creating models and layers with
518
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
519
+ about any of this, as you can just pass inputs like you would to any other Python function!
520
+
521
+ </Tip>
522
+
523
+ Parameters:
524
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
525
+ Initializing with a config file does not load the weights associated with the model, only the
526
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
527
+ """
528
+
529
+ CONVNEXTV2_INPUTS_DOCSTRING = r"""
530
+ Args:
531
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
532
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
533
+ [`ConvNextImageProcessor.__call__`] for details.
534
+
535
+ output_hidden_states (`bool`, *optional*):
536
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
537
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
538
+ used instead.
539
+ return_dict (`bool`, *optional*):
540
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
541
+ eager mode, in graph mode the value will always be set to `True`.
542
+ """
543
+
544
+
545
+ @add_start_docstrings(
546
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
547
+ CONVNEXTV2_START_DOCSTRING,
548
+ )
549
+ class TFConvNextV2Model(TFConvNextV2PreTrainedModel):
550
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
551
+ super().__init__(config, *inputs, **kwargs)
552
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
553
+
554
+ @unpack_inputs
555
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
556
+ @add_code_sample_docstrings(
557
+ checkpoint=_CHECKPOINT_FOR_DOC,
558
+ output_type=TFBaseModelOutputWithPoolingAndNoAttention,
559
+ config_class=_CONFIG_FOR_DOC,
560
+ modality="vision",
561
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
562
+ )
563
+ def call(
564
+ self,
565
+ pixel_values: TFModelInputType | None = None,
566
+ output_hidden_states: Optional[bool] = None,
567
+ return_dict: Optional[bool] = None,
568
+ training: bool = False,
569
+ ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
570
+ output_hidden_states = (
571
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
572
+ )
573
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
574
+
575
+ if pixel_values is None:
576
+ raise ValueError("You have to specify pixel_values")
577
+
578
+ outputs = self.convnextv2(
579
+ pixel_values=pixel_values,
580
+ output_hidden_states=output_hidden_states,
581
+ return_dict=return_dict,
582
+ training=training,
583
+ )
584
+
585
+ if not return_dict:
586
+ return outputs[:]
587
+
588
+ return TFBaseModelOutputWithPoolingAndNoAttention(
589
+ last_hidden_state=outputs.last_hidden_state,
590
+ pooler_output=outputs.pooler_output,
591
+ hidden_states=outputs.hidden_states,
592
+ )
593
+
594
+ def build(self, input_shape=None):
595
+ if self.built:
596
+ return
597
+ self.built = True
598
+ if getattr(self, "convnextv2", None) is not None:
599
+ with tf.name_scope(self.convnextv2.name):
600
+ self.convnextv2.build(None)
601
+
602
+
603
+ @add_start_docstrings(
604
+ """
605
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
606
+ ImageNet.
607
+ """,
608
+ CONVNEXTV2_START_DOCSTRING,
609
+ )
610
+ class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss):
611
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
612
+ super().__init__(config, *inputs, **kwargs)
613
+
614
+ self.num_labels = config.num_labels
615
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
616
+
617
+ # Classifier head
618
+ self.classifier = keras.layers.Dense(
619
+ units=config.num_labels,
620
+ kernel_initializer=get_initializer(config.initializer_range),
621
+ bias_initializer=keras.initializers.Zeros(),
622
+ name="classifier",
623
+ )
624
+
625
+ @unpack_inputs
626
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
627
+ @add_code_sample_docstrings(
628
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
629
+ output_type=TFImageClassifierOutputWithNoAttention,
630
+ config_class=_CONFIG_FOR_DOC,
631
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
632
+ )
633
+ def call(
634
+ self,
635
+ pixel_values: TFModelInputType | None = None,
636
+ output_hidden_states: Optional[bool] = None,
637
+ return_dict: Optional[bool] = None,
638
+ labels: np.ndarray | tf.Tensor | None = None,
639
+ training: Optional[bool] = False,
640
+ ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
641
+ r"""
642
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
643
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
644
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
645
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
646
+ """
647
+ output_hidden_states = (
648
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
649
+ )
650
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
651
+
652
+ if pixel_values is None:
653
+ raise ValueError("You have to specify pixel_values")
654
+
655
+ outputs = self.convnextv2(
656
+ pixel_values,
657
+ output_hidden_states=output_hidden_states,
658
+ return_dict=return_dict,
659
+ training=training,
660
+ )
661
+
662
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
663
+
664
+ logits = self.classifier(pooled_output)
665
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
666
+
667
+ if not return_dict:
668
+ output = (logits,) + outputs[2:]
669
+ return ((loss,) + output) if loss is not None else output
670
+
671
+ return TFImageClassifierOutputWithNoAttention(
672
+ loss=loss,
673
+ logits=logits,
674
+ hidden_states=outputs.hidden_states,
675
+ )
676
+
677
+ def build(self, input_shape=None):
678
+ if self.built:
679
+ return
680
+ self.built = True
681
+ if getattr(self, "convnextv2", None) is not None:
682
+ with tf.name_scope(self.convnextv2.name):
683
+ self.convnextv2.build(None)
684
+ if getattr(self, "classifier", None) is not None:
685
+ with tf.name_scope(self.classifier.name):
686
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc ADDED
Binary file (6.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc ADDED
Binary file (35.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc ADDED
Binary file (8.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc ADDED
Binary file (7.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_gpt_sw3"] = ["GPTSw3Tokenizer"]
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ try:
33
+ if not is_sentencepiece_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ from .tokenization_gpt_sw3 import GPTSw3Tokenizer
39
+
40
+ else:
41
+ import sys
42
+
43
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (692 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc ADDED
Binary file (5.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Inc. team and the AI-Sweden team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ Convert GPT-SW3 megatron checkpoints to pytorch"""
15
+
16
+ import argparse
17
+ import os
18
+ from os.path import isfile
19
+
20
+ import torch
21
+
22
+ from transformers import GPT2Config
23
+
24
+
25
+ def recursive_print(name, val, spaces=0):
26
+ # Format the message.
27
+ if name is None:
28
+ msg = None
29
+ else:
30
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
31
+ msg = fmt.format(name)
32
+
33
+ # Print and recurse (if needed).
34
+ if isinstance(val, dict):
35
+ if msg is not None:
36
+ print(msg)
37
+ for k in val.keys():
38
+ recursive_print(k, val[k], spaces + 2)
39
+ elif isinstance(val, torch.Tensor):
40
+ print(msg, ":", val.size())
41
+ else:
42
+ print(msg, ":", val)
43
+
44
+
45
+ def fix_query_key_value_ordering(param, num_splits, num_heads, hidden_size):
46
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
47
+ # for compatibility with later versions of NVIDIA Megatron-LM.
48
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
49
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
50
+ # If param is the weight tensor of the self-attention block, the returned tensor
51
+ # will have to be transposed one more time to be read by HuggingFace GPT2.
52
+ input_shape = param.size()
53
+ # other versions store [num_heads * num_splits * hidden_size, :]
54
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
55
+ param = param.view(*saved_shape)
56
+ param = param.transpose(0, 1).contiguous()
57
+ param = param.view(*input_shape)
58
+ return param
59
+
60
+
61
+ def convert_megatron_checkpoint(sd_megatron, config):
62
+ """
63
+ Converts a Megatron checkpoint to a HuggingFace GPT-SW3 checkpoint.
64
+ """
65
+ n_positions = config.n_positions
66
+ layers = config.n_layer
67
+ vocab_size = config.vocab_size
68
+ heads = config.n_head
69
+ hidden_size_per_head = config.n_embd // config.n_head
70
+
71
+ word_embeddings = sd_megatron["model.language_model.embedding.word_embeddings.weight"][:vocab_size, :]
72
+ sd_hf = {
73
+ "transformer.wte.weight": word_embeddings,
74
+ "transformer.wpe.weight": sd_megatron["model.language_model.embedding.position_embeddings.weight"],
75
+ "transformer.ln_f.weight": sd_megatron["model.language_model.encoder.final_layernorm.weight"],
76
+ "transformer.ln_f.bias": sd_megatron["model.language_model.encoder.final_layernorm.bias"],
77
+ }
78
+
79
+ pf = "model.language_model.encoder.layers."
80
+ for i in range(layers):
81
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.bool))
82
+ causal_mask = causal_mask.view(1, 1, n_positions, n_positions)
83
+ sd_hf[f"transformer.h.{i}.attn.bias"] = causal_mask
84
+ sd_hf[f"transformer.h.{i}.attn.masked_bias"] = torch.tensor(-1e4, dtype=torch.bfloat16)
85
+
86
+ sd_hf[f"transformer.h.{i}.ln_1.weight"] = sd_megatron[f"{pf}{i}.input_layernorm.weight"]
87
+ sd_hf[f"transformer.h.{i}.ln_1.bias"] = sd_megatron[f"{pf}{i}.input_layernorm.bias"]
88
+
89
+ val1 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.weight"]
90
+ val1 = fix_query_key_value_ordering(val1, 3, heads, hidden_size_per_head)
91
+ sd_hf[f"transformer.h.{i}.attn.c_attn.weight"] = val1.transpose(0, 1).contiguous()
92
+
93
+ val2 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.bias"]
94
+ val2 = fix_query_key_value_ordering(val2, 3, heads, hidden_size_per_head)
95
+ sd_hf[f"transformer.h.{i}.attn.c_attn.bias"] = val2
96
+
97
+ sd_hf[f"transformer.h.{i}.attn.c_proj.weight"] = sd_megatron[f"{pf}{i}.self_attention.dense.weight"].transpose(
98
+ 0, 1
99
+ )
100
+ sd_hf[f"transformer.h.{i}.attn.c_proj.bias"] = sd_megatron[f"{pf}{i}.self_attention.dense.bias"]
101
+ sd_hf[f"transformer.h.{i}.ln_2.weight"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.weight"]
102
+ sd_hf[f"transformer.h.{i}.ln_2.bias"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.bias"]
103
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.weight"].transpose(0, 1)
104
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.bias"]
105
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.weight"].transpose(
106
+ 0, 1
107
+ )
108
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.bias"]
109
+
110
+ # For LM head, transformers' wants the matrix to weight embeddings.
111
+ sd_hf["lm_head.weight"] = word_embeddings
112
+
113
+ return sd_hf
114
+
115
+
116
+ def copy_config(config_hf, config_megatron):
117
+ """Copy the config from Megatron to hf."""
118
+ config_hf.vocab_size = 64000
119
+ config_hf.n_positions = config_megatron["encoder_seq_length"]
120
+ config_hf.n_embd = config_megatron["hidden_size"]
121
+ config_hf.n_layer = config_megatron["num_layers"]
122
+ config_hf.n_head = config_megatron["num_attention_heads"]
123
+ config_hf.n_inner = config_megatron["ffn_hidden_size"]
124
+ config_hf.activation_function = "gelu"
125
+ config_hf.resid_pdrop = 0.1
126
+ config_hf.embd_pdrop = 0.1
127
+ config_hf.attn_pdrop = 0.1
128
+ config_hf.layer_norm_epsilon = config_megatron["layernorm_epsilon"] # 1e-5
129
+ config_hf.initializer_range = config_megatron["init_method_std"] # 0.02
130
+ config_hf.apply_query_key_layer_scaling = config_megatron["apply_query_key_layer_scaling"] # True
131
+ config_hf.normalize_attention_scores = True
132
+ config_hf.use_cache = True
133
+
134
+ # This identifies the 6.7B (7B) model which uses a different tokenizer
135
+ if config_megatron["hidden_size"] == 4096:
136
+ config_hf.bos_token_id = 1 # <|endoftext|>
137
+ config_hf.eos_token_id = 1 # <|endoftext|>
138
+ config_hf.pad_token_id = 0 # <unk>
139
+ else:
140
+ config_hf.bos_token_id = 2 # <s>
141
+ config_hf.eos_token_id = 3 # <|endoftext|>
142
+ config_hf.pad_token_id = 0 # <pad>
143
+
144
+ return config_hf
145
+
146
+
147
+ def main(args):
148
+ print(args)
149
+
150
+ checkpoint_path = args.checkpoint_path
151
+ save_path = args.save_path
152
+ if isfile(checkpoint_path):
153
+ raise FileNotFoundError(f"ERROR! could not find file {checkpoint_path}")
154
+
155
+ # Load the model.
156
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
157
+
158
+ # Load the config.
159
+ config_megatron = checkpoint["hyper_parameters"]["cfg"]
160
+ config_hf = GPT2Config()
161
+ config_hf = copy_config(config_hf=config_hf, config_megatron=config_megatron)
162
+ config_hf.architectures = ["GPT2LMHeadModel"]
163
+
164
+ sd_megatron = checkpoint["state_dict"]
165
+
166
+ # Convert.
167
+ print("Converting")
168
+ sd_hf = convert_megatron_checkpoint(sd_megatron, config_hf)
169
+
170
+ # Print the structure of converted state dict.
171
+ if args.print_checkpoint_structure:
172
+ recursive_print(None, sd_hf)
173
+
174
+ config_hf.tokenizer_class = "GPTSw3Tokenizer"
175
+
176
+ # Store the config to file.
177
+ print("Saving config")
178
+ config_hf.save_pretrained(save_path)
179
+
180
+ # Store the state_dict to file.
181
+ output_checkpoint_file = os.path.join(save_path, "pytorch_model.bin")
182
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
183
+ torch.save(sd_hf, output_checkpoint_file)
184
+
185
+
186
+ if __name__ == "__main__":
187
+ parser = argparse.ArgumentParser()
188
+ parser.add_argument(
189
+ "--checkpoint_path",
190
+ type=str,
191
+ required=True,
192
+ help="e.g. megatron_gpt--val_loss=2.42-step=38000-consumed_samples=54720000",
193
+ )
194
+ parser.add_argument("--save_path", type=str, required=True, help="e.g. /home/user/gpt-sw3/hf")
195
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
196
+ _args = parser.parse_args()
197
+ main(_args)
env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The tokenizer used by the GPT-SW3 models."""
2
+
3
+ import os
4
+ import re
5
+ import unicodedata
6
+ from shutil import copyfile
7
+ from typing import Any, Dict, List, Optional, Tuple, Union
8
+
9
+ import sentencepiece as spm
10
+
11
+ from ...tokenization_utils import PreTrainedTokenizer
12
+ from ...utils import is_torch_available, logging
13
+
14
+
15
+ if is_torch_available():
16
+ import torch
17
+
18
+
19
+ logger = logging.get_logger(__name__)
20
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
21
+
22
+ PRETRAINED_VOCAB_FILES_MAP = {
23
+ "vocab_file": {
24
+ "AI-Sweden-Models/gpt-sw3-126m": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-126m/resolve/main/spiece.model",
25
+ "AI-Sweden-Models/gpt-sw3-356m": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-356m/resolve/main/spiece.model",
26
+ "AI-Sweden-Models/gpt-sw3-1.3b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-1.3b/resolve/main/spiece.model",
27
+ "AI-Sweden-Models/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-6.7b/resolve/main/spiece.model",
28
+ "AI-Sweden-Models/gpt-sw3-6.7b-v2": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-6.7b-v2/resolve/main/spiece.model",
29
+ "AI-Sweden-Models/gpt-sw3-20b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-20b/resolve/main/spiece.model",
30
+ "AI-Sweden-Models/gpt-sw3-40b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-20b/resolve/main/spiece.model",
31
+ }
32
+ }
33
+
34
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
35
+ "AI-Sweden-Models/gpt-sw3-126m": 2048,
36
+ "AI-Sweden-Models/gpt-sw3-356m": 2048,
37
+ "AI-Sweden-Models/gpt-sw3-1.3b": 2048,
38
+ "AI-Sweden-Models/gpt-sw3-6.7b": 2048,
39
+ "AI-Sweden-Models/gpt-sw3-6.7b-v2": 2048,
40
+ "AI-Sweden-Models/gpt-sw3-20b": 2048,
41
+ "AI-Sweden-Models/gpt-sw3-40b": 2048,
42
+ }
43
+
44
+
45
+ class GPTSw3Tokenizer(PreTrainedTokenizer):
46
+ """
47
+ Construct an GPTSw3 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
48
+
49
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
50
+ this superclass for more information regarding those methods.
51
+
52
+ Example usage:
53
+ ```python
54
+ >>> from transformers import GPTSw3Tokenizer
55
+
56
+ >>> tokenizer = GPTSw3Tokenizer.from_pretrained("AI-Sweden-Models/gpt-sw3-126m")
57
+ >>> tokenizer("Svenska är kul!")["input_ids"]
58
+ [1814, 377, 3617, 63504]
59
+ ```
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
64
+ contains the vocabulary necessary to instantiate a tokenizer.
65
+ do_lower_case (`bool`, *optional*, defaults to `False`):
66
+ Whether or not to lowercase the input when tokenizing.
67
+ remove_space (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
69
+ keep_accents (`bool`, *optional*, defaults to `False`):
70
+ Whether or not to keep accents when tokenizing.
71
+ pad_token (`str`, *optional*):
72
+ The token used for padding, for example when batching sequences of different lengths. If not provided, will
73
+ default to '<pad>' or '<unk>' depending on model size.
74
+ unk_token (`str`, *optional*):
75
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
76
+ token instead. If not provided, will default to '<unk>'.
77
+ eos_token (`str`, *optional*):
78
+ The end of sequence token seen during pretraining. If not provided, will default to '<|endoftext|>'
79
+ bos_token (`str`, *optional*):
80
+ The beginning of sequence token that can be used for downstream task, was not seen during pretraining. If
81
+ not provided, will default to '<s>' or '<|endoftext|>', depending on model size.
82
+ sp_model_kwargs (`dict`, *optional*):
83
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
84
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
85
+ to set:
86
+
87
+ - `enable_sampling`: Enable subword regularization.
88
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
89
+
90
+ - `nbest_size = {0,1}`: No sampling is performed.
91
+ - `nbest_size > 1`: samples from the nbest_size results.
92
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
93
+ using forward-filtering-and-backward-sampling algorithm.
94
+
95
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
96
+ BPE-dropout.
97
+
98
+ Attributes:
99
+ sp_model (`SentencePieceProcessor`):
100
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
101
+ whitespaces (`set`):
102
+ The whitespaces that are replaced in the whitespace normalization in preprocessing.
103
+ non_printing_characters_re (`Pattern`):
104
+ The compiled regular expression to remove non-printing characters in preprocessing.
105
+ """
106
+
107
+ vocab_files_names = VOCAB_FILES_NAMES
108
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
109
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
110
+ model_input_names = ["input_ids", "attention_mask"]
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_file,
115
+ do_lower_case=False,
116
+ remove_space=False,
117
+ keep_accents=False,
118
+ pad_token=None,
119
+ unk_token=None,
120
+ eos_token=None,
121
+ bos_token=None,
122
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
123
+ **kwargs,
124
+ ) -> None:
125
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
126
+
127
+ name_or_path = kwargs.get("name_or_path")
128
+ if name_or_path is None:
129
+ logger.warning(
130
+ "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
131
+ " you are testing the model, this can safely be ignored"
132
+ )
133
+ name_or_path = "None"
134
+
135
+ # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
136
+ eos_token = "<|endoftext|>" if eos_token is None else eos_token
137
+ unk_token = "<unk>" if unk_token is None else unk_token
138
+ if "gpt-sw3-7b" in name_or_path:
139
+ pad_token = unk_token if pad_token is None else pad_token
140
+ bos_token = eos_token if bos_token is None else bos_token
141
+ else:
142
+ pad_token = "<pad>" if pad_token is None else pad_token
143
+ bos_token = "<s>" if bos_token is None else bos_token
144
+
145
+ self.do_lower_case = do_lower_case
146
+ self.remove_space = remove_space
147
+ self.keep_accents = keep_accents
148
+ self.vocab_file = vocab_file
149
+
150
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
151
+ self.sp_model.Load(vocab_file)
152
+
153
+ # Used for whitespace normalization in input texts
154
+ # fmt : off
155
+ self.whitespaces = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"}
156
+ # fmt : on
157
+
158
+ # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
159
+ self.non_printing_characters_re = re.compile(
160
+ f"[{''.join(map(chr, list(range(0, 9)) + list(range(11, 32)) + list(range(127, 160)) + [160, 173, 8203]))}]"
161
+ )
162
+
163
+ super().__init__(
164
+ do_lower_case=do_lower_case,
165
+ remove_space=remove_space,
166
+ keep_accents=keep_accents,
167
+ bos_token=bos_token,
168
+ eos_token=eos_token,
169
+ unk_token=unk_token,
170
+ pad_token=pad_token,
171
+ sp_model_kwargs=self.sp_model_kwargs,
172
+ **kwargs,
173
+ )
174
+
175
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__getstate__
176
+ def __getstate__(self):
177
+ state = self.__dict__.copy()
178
+ state["sp_model"] = None
179
+ return state
180
+
181
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__setstate__
182
+ def __setstate__(self, d):
183
+ self.__dict__ = d
184
+
185
+ # for backward compatibility
186
+ if not hasattr(self, "sp_model_kwargs"):
187
+ self.sp_model_kwargs = {}
188
+
189
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
190
+ self.sp_model.Load(self.vocab_file)
191
+
192
+ @property
193
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
194
+ def vocab_size(self) -> int:
195
+ return len(self.sp_model)
196
+
197
+ def preprocess_text(self, text: str) -> str:
198
+ """
199
+ Returns the preprocessed text. This procedure is identical to what was used when training the tokenizer.
200
+ """
201
+
202
+ # Remove non-printing characters
203
+ text = self.non_printing_characters_re.sub("", text)
204
+
205
+ # Normalize whitespaces
206
+ text = "".join([char if char not in self.whitespaces else " " for char in text])
207
+
208
+ # NFC Unicode normalization
209
+ text = unicodedata.normalize("NFC", text)
210
+ return text
211
+
212
+ def _tokenize(self, text: str, **kwargs) -> List[str]:
213
+ text = self.preprocess_text(text)
214
+ return self.sp_model.encode(text, out_type=str)
215
+
216
+ def _convert_token_to_id(self, token: str) -> int:
217
+ """Converts a token (str) to an id (int) using the vocab."""
218
+ return self.sp_model.PieceToId(token)
219
+
220
+ def _convert_id_to_token(self, index: int) -> str:
221
+ """Converts an index (int) to a token (str) using the vocab."""
222
+ return self.sp_model.IdToPiece(index)
223
+
224
+ @staticmethod
225
+ def clean_up_tokenization(out_string: str) -> str:
226
+ """Returns the input string, this function is overridden to remove the default clean up."""
227
+ return out_string
228
+
229
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
230
+ """Converts a sequence of tokens (strings) to a single string. Special tokens remain intact."""
231
+ current_sub_tokens = []
232
+ out_string = ""
233
+ prev_is_special = False
234
+ for token in tokens:
235
+ # make sure that special tokens are not decoded using sentencepiece model
236
+ if token in self.all_special_tokens:
237
+ # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
238
+ if not prev_is_special:
239
+ out_string += " "
240
+
241
+ out_string += self.sp_model.decode(current_sub_tokens) + token
242
+ prev_is_special = True
243
+ current_sub_tokens = []
244
+ else:
245
+ current_sub_tokens.append(token)
246
+ prev_is_special = False
247
+ out_string += self.sp_model.decode(current_sub_tokens)
248
+
249
+ return out_string
250
+
251
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.get_vocab
252
+ def get_vocab(self) -> Dict[str, int]:
253
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
254
+ vocab.update(self.added_tokens_encoder)
255
+ return vocab
256
+
257
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.save_vocabulary
258
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
259
+ if not os.path.isdir(save_directory):
260
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
261
+ return
262
+ out_vocab_file = os.path.join(
263
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
264
+ )
265
+
266
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
267
+ copyfile(self.vocab_file, out_vocab_file)
268
+ elif not os.path.isfile(self.vocab_file):
269
+ with open(out_vocab_file, "wb") as fi:
270
+ content_spiece_model = self.sp_model.serialized_model_proto()
271
+ fi.write(content_spiece_model)
272
+
273
+ return (out_vocab_file,)
274
+
275
+ def encode_fast(
276
+ self, text: Union[str, List[str]], return_tensors: Union[str, bool] = False
277
+ ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
278
+ """
279
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
280
+ functionality but is often much faster.
281
+
282
+ Does NOT handle special tokens correctly, these can manually be added as ids afterwards.
283
+
284
+ Does NOT support padding, these can manually be added as ids afterwards.
285
+
286
+ Use default HuggingFace tokenization methods for full functionality.
287
+
288
+ Args:
289
+ text (`str` or `List[str]`): One or several text(s) to convert to token ids.
290
+ return_tensors (`str` or `bool`): Returns PyTorch tensors if set to True or "pt"
291
+
292
+ Returns:
293
+ `List[int]`, `List[List[int]]`, or `torch.Tensor`: The encoded text(s) as token ids.
294
+ """
295
+
296
+ if isinstance(text, str):
297
+ text = self.preprocess_text(text)
298
+ token_ids = self.sp_model.encode(text)
299
+ else:
300
+ text = [self.preprocess_text(t) for t in text]
301
+ token_ids = self.sp_model.encode(text)
302
+
303
+ if return_tensors is True or return_tensors == "pt":
304
+ token_ids = torch.tensor(token_ids)
305
+
306
+ return token_ids
307
+
308
+ def decode_fast(self, token_ids: Union[int, List[int]]) -> str:
309
+ """
310
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
311
+ functionality but is often much faster.
312
+
313
+ Args:
314
+ token_ids (`int` or `List[int]`): Encoded token or text as token id(s).
315
+
316
+ Returns:
317
+ `str`: Decoded text
318
+ """
319
+
320
+ return self.sp_model.decode(token_ids)
321
+
322
+ @property
323
+ def default_chat_template(self):
324
+ """
325
+ This chat template formats messages like an instant messenger chat log, with "User:" and "Bot:" strings
326
+ preceding messages. BOS tokens are added between all messages.
327
+ """
328
+ logger.warning_once(
329
+ "\nNo chat template is defined for this tokenizer - using the default template "
330
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
331
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
332
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
333
+ )
334
+ return (
335
+ "{{ eos_token }}{{ bos_token }}"
336
+ "{% for message in messages %}"
337
+ "{% if message['role'] == 'user' %}{{ 'User: ' + message['content']}}"
338
+ "{% else %}{{ 'Bot: ' + message['content']}}{% endif %}"
339
+ "{{ message['text'] }}{{ bos_token }}"
340
+ "{% endfor %}"
341
+ "Bot:"
342
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_gptsan_japanese": ["GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig"],
28
+ "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_gptsan_japanese"] = [
38
+ "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "GPTSanJapaneseForConditionalGeneration",
40
+ "GPTSanJapaneseModel",
41
+ "GPTSanJapanesePreTrainedModel",
42
+ ]
43
+ _import_structure["tokenization_gptsan_japanese"] = [
44
+ "GPTSanJapaneseTokenizer",
45
+ ]
46
+
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_gptsan_japanese import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig
50
+ from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
51
+
52
+ try:
53
+ if not is_torch_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .modeling_gptsan_japanese import (
59
+ GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
60
+ GPTSanJapaneseForConditionalGeneration,
61
+ GPTSanJapaneseModel,
62
+ GPTSanJapanesePreTrainedModel,
63
+ )
64
+ from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
65
+
66
+
67
+ else:
68
+ import sys
69
+
70
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023, HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPTSAN-japanese model configuration"""
16
+ from ...configuration_utils import PretrainedConfig
17
+ from ...utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+ GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
23
+ "tanreinama/GPTSAN-2.8B-spout_is_uniform": (
24
+ "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
25
+ ),
26
+ }
27
+
28
+
29
+ class GPTSanJapaneseConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate
32
+ a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese
34
+ [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Arguments:
40
+ vocab_size (`int`, *optional*, defaults to 36000):
41
+ Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented
42
+ by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`].
43
+ max_position_embeddings (`int`, *optional*, defaults to 1280):
44
+ The maximum sequence length that this model might ever be used with. Defaults set this to 1280.
45
+ d_model (`int`, *optional*, defaults to 1024):
46
+ Size of the encoder layers and the pooler layer.
47
+ d_ff (`int`, *optional*, defaults to 8192):
48
+ Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
49
+ d_ext (`int`, *optional*, defaults to 4096):
50
+ Size of the intermediate feed forward layer in each Extra-layers.
51
+ d_spout (`int`, *optional*, defaults to 128):
52
+ Size of the `spout` vector.
53
+ num_switch_layers (`int`, *optional*, defaults to 10):
54
+ Number of layers in the Switch Transformer layer.
55
+ num_ext_layers (`int`, *optional*, defaults to 0):
56
+ Number of layers in the Extra-layers.
57
+ num_heads (`int`, *optional*, defaults to 16):
58
+ Number of attention heads for each attention layer in the Transformer encoder.
59
+ num_experts (`int`, *optional*, defaults to 16):
60
+ Number of experts for each SwitchTransformer layer.
61
+ expert_capacity (`int`, *optional*, defaults to 128):
62
+ Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
63
+ Transformer.
64
+ dropout_rate (`float`, *optional*, defaults to 0.0):
65
+ The ratio for all dropout layers.
66
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
67
+ The epsilon used by the layer normalization layers.
68
+ router_bias (`bool`, *optional*, defaults to `False`):
69
+ Whether to add a bias to the router.
70
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
71
+ Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2)
72
+ during training.
73
+ router_dtype (`str`, *optional*, default to `"float32"`):
74
+ The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
75
+ *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961).
76
+ router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
77
+ Whether to ignore padding tokens when routing.
78
+ output_hidden_states (`bool`, *optional*, default to `False`):
79
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
80
+ more detail.
81
+ output_attentions (`bool`, *optional*, defaults to `False`):
82
+ Whether or not to return the attentions tensors of all attention layers.
83
+ initializer_factor (`float`, *optional*, defaults to 0.002):
84
+ A factor for initializing all weight matrices.
85
+ output_router_logits (`bool`, *optional*, default to `False`):
86
+ Whether or not to return the router logits of all experts.
87
+ use_cache (`bool`, *optional*, defaults to `True`):
88
+ Whether or not the model should return the last key/values attentions (not used by all models)
89
+ """
90
+
91
+ model_type = "gptsan-japanese"
92
+ keys_to_ignore_at_inference = [
93
+ "past_key_values",
94
+ ]
95
+ attribute_map = {
96
+ "hidden_size": "d_model",
97
+ "num_attention_heads": "num_heads",
98
+ "num_hidden_layers": "num_layers",
99
+ }
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=36000,
104
+ max_position_embeddings=1280,
105
+ d_model=1024,
106
+ d_ff=8192,
107
+ d_ext=4096,
108
+ d_spout=128,
109
+ num_switch_layers=10,
110
+ num_ext_layers=0,
111
+ num_heads=16,
112
+ num_experts=16,
113
+ expert_capacity=128,
114
+ dropout_rate=0.0,
115
+ layer_norm_epsilon=1e-5,
116
+ router_bias=False,
117
+ router_jitter_noise=0.0,
118
+ router_dtype="float32",
119
+ router_ignore_padding_tokens=False,
120
+ output_hidden_states=False,
121
+ output_attentions=False,
122
+ initializer_factor=0.002,
123
+ output_router_logits=False,
124
+ use_cache=True,
125
+ separator_token_id=35998,
126
+ pad_token_id=35995,
127
+ eos_token_id=35999,
128
+ **kwargs,
129
+ ):
130
+ self.vocab_size = vocab_size
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.d_model = d_model
133
+ self.d_ff = d_ff
134
+ self.d_ext = d_ext
135
+ self.d_spout = d_spout
136
+ self.num_switch_layers = num_switch_layers
137
+ self.num_ext_layers = num_ext_layers
138
+ self.num_layers = num_switch_layers + num_ext_layers
139
+ self.num_heads = num_heads
140
+ self.num_experts = num_experts
141
+ self.expert_capacity = expert_capacity
142
+ self.dropout_rate = dropout_rate
143
+ self.layer_norm_epsilon = layer_norm_epsilon
144
+ self.router_bias = router_bias
145
+ self.router_jitter_noise = router_jitter_noise
146
+ self.router_dtype = router_dtype
147
+ self.router_ignore_padding_tokens = router_ignore_padding_tokens
148
+ self.output_hidden_states = output_hidden_states
149
+ self.output_attentions = output_attentions
150
+ self.initializer_factor = initializer_factor
151
+ self.output_router_logits = output_router_logits
152
+ self.use_cache = use_cache
153
+
154
+ super().__init__(
155
+ separator_token_id=separator_token_id,
156
+ pad_token_id=pad_token_id,
157
+ eos_token_id=eos_token_id,
158
+ **kwargs,
159
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert GPTSANJapanese checkpoints from the original repository to pytorch model."""
17
+
18
+ import argparse
19
+ import json
20
+ import os
21
+ from collections import OrderedDict
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+ import torch
26
+
27
+
28
+ def convert_tf_gptsan_to_pt(args):
29
+ parameter_file = os.path.join(args.tf_model_dir, "parameters.json")
30
+ params = json.loads(open(parameter_file).read())
31
+ if not params:
32
+ raise ValueError(
33
+ f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file."
34
+ )
35
+ if not args.output.endswith(".pt"):
36
+ args.output = args.output + ".pt"
37
+ new_state = OrderedDict()
38
+ with tf.device("/CPU:0"):
39
+ reader = tf.train.load_checkpoint(args.tf_model_dir)
40
+ shapes = reader.get_variable_to_shape_map()
41
+ for key_name in shapes.keys():
42
+ vnp = reader.get_tensor(key_name).astype(np.float16)
43
+ if key_name.endswith("/adam_m") or key_name.endswith("/adam_v"):
44
+ continue
45
+ if key_name.startswith("pasts/"):
46
+ if key_name.startswith("pasts/mlp"):
47
+ player = int(key_name[9])
48
+ elif key_name.startswith("pasts/out"):
49
+ player = 8
50
+ name = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
51
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
52
+ new_state[name] = torch.tensor(state)
53
+ elif key_name.startswith("model/moe"):
54
+ player = int(key_name[9:].split("/")[0])
55
+ if key_name.endswith("/switch_gating/kernel"):
56
+ name = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
57
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
58
+ new_state[name] = torch.tensor(state)
59
+ elif key_name.endswith("/softmlp/kernel"):
60
+ name = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
61
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
62
+ new_state[name] = torch.tensor(state)
63
+ elif key_name.endswith("/wo/kernel") or key_name.endswith("/wi/kernel"):
64
+ nlayer = key_name[-9:-7]
65
+ for i in range(16):
66
+ name = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
67
+ state = (
68
+ vnp[i].transpose([1, 0]).copy()
69
+ ) # In Mesh-Tensorflow, it is one array, so it is divided
70
+ new_state[name] = torch.tensor(state)
71
+ elif key_name.startswith("model/mlp"):
72
+ player = int(key_name[9:].split("/")[0])
73
+ if key_name.endswith("/p1/kernel"):
74
+ name = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
75
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
76
+ new_state[name] = torch.tensor(state)
77
+ elif key_name.endswith("/p1/bias"):
78
+ name = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
79
+ state = vnp.copy() # same because it is one dimensional
80
+ new_state[name] = torch.tensor(state)
81
+ elif key_name.endswith("/p2/kernel"):
82
+ name = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
83
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
84
+ new_state[name] = torch.tensor(state)
85
+ elif key_name.endswith("/p2/bias"):
86
+ name = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
87
+ state = vnp.copy() # same because it is one dimensional
88
+ new_state[name] = torch.tensor(state)
89
+ elif key_name.startswith("model/ln"):
90
+ player = int(key_name[8:].split("/")[0])
91
+ if key_name.endswith("/b"):
92
+ name = "model.blocks.%d.feed_forward.norm.bias" % player
93
+ state = vnp.copy() # same because it is one dimensional
94
+ new_state[name] = torch.tensor(state)
95
+ elif key_name.endswith("/g"):
96
+ name = "model.blocks.%d.feed_forward.norm.weight" % player
97
+ state = vnp.copy() # same because it is one dimensional
98
+ new_state[name] = torch.tensor(state)
99
+ elif key_name.startswith("model/att"):
100
+ player = int(key_name[9:].split("/")[0])
101
+ if key_name.endswith("/qkv/kernel"):
102
+ state = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
103
+ state_q = state[:, 0, :, :]
104
+ state_k = state[:, 1, :, :]
105
+ state_v = state[:, 2, :, :]
106
+ state_q = (
107
+ state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
108
+ .transpose([1, 0])
109
+ .copy()
110
+ ) # Mesh-Tensorflow is a diagonal matrix
111
+ state_k = (
112
+ state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
113
+ .transpose([1, 0])
114
+ .copy()
115
+ ) # Mesh-Tensorflow is a diagonal matrix
116
+ state_v = (
117
+ state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
118
+ .transpose([1, 0])
119
+ .copy()
120
+ ) # Mesh-Tensorflow is a diagonal matrix
121
+ name = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
122
+ new_state[name] = torch.tensor(state_q)
123
+ name = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
124
+ new_state[name] = torch.tensor(state_k)
125
+ name = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
126
+ new_state[name] = torch.tensor(state_v)
127
+ elif key_name.endswith("/o/kernel"):
128
+ name = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
129
+ state = (
130
+ vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
131
+ ) # Mesh-Tensorflow is a diagonal matrix
132
+ new_state[name] = torch.tensor(state)
133
+ elif key_name.startswith("model/an"):
134
+ player = int(key_name[8:].split("/")[0])
135
+ if key_name.endswith("/b"):
136
+ name = "model.blocks.%d.self_attn.norm.bias" % player
137
+ state = vnp.copy() # same because it is one dimensional
138
+ new_state[name] = torch.tensor(state)
139
+ elif key_name.endswith("/g"):
140
+ name = "model.blocks.%d.self_attn.norm.weight" % player
141
+ state = vnp.copy() # same because it is one dimensional
142
+ new_state[name] = torch.tensor(state)
143
+ elif (
144
+ key_name.startswith("model/wte")
145
+ or key_name.startswith("model/wpe")
146
+ or key_name.startswith("model/ete")
147
+ ):
148
+ nlayer = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
149
+ key_name[-3:]
150
+ ]
151
+ name = "model.%s.weight" % nlayer
152
+ state = vnp.copy() # same in embedded
153
+ new_state[name] = torch.tensor(state)
154
+ if key_name.startswith("model/wte"):
155
+ name = "lm_head.weight"
156
+ state = vnp.copy() # same in embedded
157
+ new_state[name] = torch.tensor(state)
158
+ elif key_name.startswith("model/wob"):
159
+ name = "final_logits_bias"
160
+ state = vnp.copy() # same in embedded
161
+ state = state.reshape((1, -1))
162
+ new_state[name] = torch.tensor(state)
163
+ elif key_name == "model/dense/kernel":
164
+ name = "model.last_project.weight"
165
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
166
+ new_state[name] = torch.tensor(state)
167
+ elif key_name == "model/dense_1/bias":
168
+ name = "model.last_project.bias"
169
+ state = vnp.copy() # same because it is one dimensional
170
+ new_state[name] = torch.tensor(state)
171
+ torch.save(new_state, args.output)
172
+
173
+
174
+ if __name__ == "__main__":
175
+ parser = argparse.ArgumentParser(
176
+ description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
177
+ )
178
+ parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
179
+ parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
180
+ args = parser.parse_args()
181
+ convert_tf_gptsan_to_pt(args)
env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py ADDED
@@ -0,0 +1,1345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Toshiyuki Sakamoto(tanreinama) and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPTSANJapanese model."""
16
+
17
+
18
+ import copy
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import (
28
+ DUMMY_INPUTS,
29
+ DUMMY_MASK,
30
+ add_start_docstrings,
31
+ add_start_docstrings_to_model_forward,
32
+ is_torch_fx_proxy,
33
+ logging,
34
+ )
35
+ from .configuration_gptsan_japanese import GPTSanJapaneseConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "GPTSanJapaneseConfig"
41
+ _CHECKPOINT_FOR_DOC = "Tanrei/GPTSAN-japanese"
42
+
43
+ ####################################################
44
+ # This dict contains ids and associated url
45
+ # for the pretrained weights provided with the models
46
+ ####################################################
47
+ GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = [
48
+ "Tanrei/GPTSAN-japanese",
49
+ # See all GPTSAN-japanese models at https://huggingface.co/models?filter=gptsan-japanese
50
+ ]
51
+
52
+
53
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.router_z_loss_func
54
+ def router_z_loss_func(router_logits: torch.Tensor) -> float:
55
+ r"""
56
+ Compute the router z-loss implemented in PyTorch.
57
+
58
+ The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906).
59
+ It encourages router logits to remain small in an effort to improve stability.
60
+
61
+ Args:
62
+ router_logits (`float`):
63
+ Input logits of shape [batch_size, sequence_length, num_experts]
64
+
65
+ Returns:
66
+ Scalar router z-loss.
67
+ """
68
+ num_groups, tokens_per_group, _ = router_logits.shape
69
+ log_z = torch.logsumexp(router_logits, dim=-1)
70
+ z_loss = log_z**2
71
+ return torch.sum(z_loss) / (num_groups * tokens_per_group)
72
+
73
+
74
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.load_balancing_loss_func
75
+ def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
76
+ r"""
77
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
78
+
79
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
80
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
81
+ experts is too unbalanced.
82
+
83
+ Args:
84
+ router_probs (`torch.Tensor`):
85
+ Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
86
+ expert_indices (`torch.Tensor`):
87
+ Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
88
+
89
+ Returns:
90
+ The auxiliary loss.
91
+ """
92
+ num_experts = router_probs.shape[-1]
93
+
94
+ # cast the expert indices to int64, otherwise one-hot encoding will fail
95
+ if expert_indices.dtype != torch.int64:
96
+ expert_indices = expert_indices.to(torch.int64)
97
+
98
+ if len(expert_indices.shape) == 2:
99
+ expert_indices = expert_indices.unsqueeze(2)
100
+
101
+ expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
102
+
103
+ # For a given token, determine if it was routed to a given expert.
104
+ expert_mask = torch.max(expert_mask, axis=-2).values
105
+
106
+ # cast to float32 otherwise mean will fail
107
+ expert_mask = expert_mask.to(torch.float32)
108
+ tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
109
+
110
+ router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
111
+ return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2)
112
+
113
+
114
+ class GPTSanJapaneseDenseActDense(nn.Module):
115
+ """
116
+ FFN Layer for Switch Transformer and Extra layers
117
+
118
+ GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch
119
+ Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and
120
+ Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument.
121
+
122
+ """
123
+
124
+ def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False):
125
+ super().__init__()
126
+ d_inter = config.d_ext if ext_layer else config.d_ff
127
+ self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer)
128
+ self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer)
129
+ self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate)
130
+ self.act = ACT2FN["swish" if ext_layer else "relu"]
131
+
132
+ def forward(self, hidden_states):
133
+ r"""
134
+ Args:
135
+ hidden_states (`torch.Tensor`) :
136
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
137
+ Returns:
138
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
139
+
140
+ """
141
+ hidden_states = self.wi(hidden_states)
142
+ hidden_states = self.act(hidden_states)
143
+ hidden_states = self.dropout(hidden_states)
144
+ hidden_states = self.wo(hidden_states)
145
+ return hidden_states
146
+
147
+
148
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersTop1Router with SwitchTransformers->GPTSanJapanese
149
+ class GPTSanJapaneseTop1Router(nn.Module):
150
+ """
151
+ Router using tokens choose top-1 experts assignment.
152
+
153
+ This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE
154
+ (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
155
+ routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
156
+ token is processed by an expert**, or that each expert receives at least one token.
157
+
158
+ """
159
+
160
+ def __init__(self, config: GPTSanJapaneseConfig):
161
+ super().__init__()
162
+ self.num_experts = config.num_experts
163
+ self.expert_capacity = config.expert_capacity
164
+ self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
165
+ self.jitter_noise = config.router_jitter_noise
166
+ self.ignore_padding_tokens = config.router_ignore_padding_tokens
167
+ self.dtype = getattr(torch, config.router_dtype)
168
+
169
+ def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
170
+ r"""
171
+ Computes router probabilities from input hidden states.
172
+
173
+ Args:
174
+ hidden_states (`torch.Tensor`):
175
+ (batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
176
+ Returns:
177
+ router_probabilities (`torch.Tensor`):
178
+ Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
179
+ token and expert. Used for routing tokens to experts.
180
+ router_logits (`torch.Tensor`):
181
+ Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
182
+ This is used later for computing router z-loss.
183
+ """
184
+ # float32 is used to ensure stability. See the discussion of "selective precision" in
185
+ # https://arxiv.org/abs/2101.03961.
186
+ # We also store the previous dtype to cast back the output to the previous dtype
187
+ self.input_dtype = hidden_states.dtype
188
+ hidden_states = hidden_states.to(self.dtype)
189
+
190
+ if self.training and self.jitter_noise > 0:
191
+ # Multiply the token inputs by the uniform distribution - adding some noise
192
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
193
+
194
+ # Shape: [num_groups, tokens_per_group, num_experts]
195
+ self._cast_classifier()
196
+ router_logits = self.classifier(hidden_states)
197
+
198
+ # Apply Softmax and cast back to the original `dtype`
199
+ router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)
200
+ return router_probabilities, router_logits
201
+
202
+ def _cast_classifier(self):
203
+ r"""
204
+ `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
205
+ instance of the `Linear8bitLt` class by checking special attributes.
206
+ """
207
+ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")):
208
+ self.classifier = self.classifier.to(self.dtype)
209
+
210
+ def forward(self, hidden_states: torch.Tensor) -> Tuple:
211
+ r"""
212
+ Generic forward function for every Router class. Each Router expects to have the same input hidden states
213
+ (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
214
+ number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
215
+
216
+ Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
217
+ `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
218
+ to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
219
+
220
+ Args:
221
+ hidden_states (`torch.Tensor`) :
222
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
223
+ Returns:
224
+ Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
225
+ and the router logits. The router probabilities and logits are required to compute the loss.
226
+ """
227
+ router_probs, router_logits = self._compute_router_probabilities(hidden_states)
228
+
229
+ expert_index = torch.argmax(router_probs, dim=-1)
230
+ expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)
231
+
232
+ # Mask tokens outside expert capacity. Sum over each sequence
233
+ token_priority = torch.cumsum(expert_index, dim=-2)
234
+ # mask if the token routed to to the expert will overflow
235
+ expert_capacity_mask = token_priority <= self.expert_capacity
236
+ expert_index = expert_index * expert_capacity_mask
237
+
238
+ router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)
239
+ return expert_index, router_probs, router_logits
240
+
241
+
242
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersSparseMLP with SwitchTransformers->GPTSanJapanese
243
+ class GPTSanJapaneseSparseMLP(nn.Module):
244
+ r"""
245
+ Implementation of the Switch Transformers Sparse MLP module.
246
+ """
247
+
248
+ def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module = GPTSanJapaneseDenseActDense):
249
+ super().__init__()
250
+ # Step 1: Get the correct router according to its class
251
+ self.router = GPTSanJapaneseTop1Router(config)
252
+
253
+ # Step 2: Get the experts
254
+ self.experts = nn.ModuleDict()
255
+ for idx in range(config.num_experts):
256
+ self.experts[f"expert_{idx}"] = expert_class(config)
257
+
258
+ def forward(self, hidden_states):
259
+ r"""
260
+ Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
261
+
262
+ 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
263
+ and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
264
+ hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
265
+
266
+ 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
267
+ expert the corresponding hidden states.
268
+
269
+ """
270
+ # Step 1: Get the router_mask from the router as wel as the probabilities
271
+ router_mask, router_probs, router_logits = self.router(hidden_states)
272
+ expert_index = torch.argmax(router_mask, dim=-1)
273
+
274
+ # The routers introduced might not always map all the tokens, to a router, which means that some hidden states
275
+ # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones.
276
+
277
+ next_states = hidden_states.clone()
278
+ for idx, expert in enumerate(self.experts.values()):
279
+ token_indices = router_mask[:, :, idx].bool()
280
+ next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype)
281
+
282
+ hidden_states = router_probs * next_states
283
+ return hidden_states, (router_logits, expert_index)
284
+
285
+
286
+ class GPTSanJapaneseLayerSparseFF(nn.Module):
287
+ r"""
288
+ Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
289
+
290
+ Parameters:
291
+ config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
292
+ Initializing with a config file does not load the weights associated with the model, only the
293
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
294
+ """
295
+
296
+ def __init__(self, config: GPTSanJapaneseConfig):
297
+ super().__init__()
298
+ self.mlp = GPTSanJapaneseSparseMLP(config)
299
+ self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False)
300
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
301
+
302
+ def forward(self, hidden_states, output_router_logits):
303
+ r"""
304
+ Args:
305
+ hidden_states (`torch.Tensor`) :
306
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
307
+ output_router_logits (`bool`) :
308
+ output experts router output.
309
+ Returns:
310
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
311
+
312
+ """
313
+ forwarded_states, router_tuple = self.mlp(hidden_states)
314
+ forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))
315
+ output = hidden_states + self.norm(forwarded_states)
316
+
317
+ if output_router_logits and router_tuple is not None:
318
+ return output, router_tuple
319
+ else:
320
+ return output
321
+
322
+
323
+ class GPTSanJapaneseLayerDenseFF(nn.Module):
324
+ r"""
325
+ Extra Transformers Feed Forward layer module.
326
+
327
+ Parameters:
328
+ config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
329
+ Initializing with a config file does not load the weights associated with the model, only the
330
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
331
+ """
332
+
333
+ def __init__(self, config: GPTSanJapaneseConfig):
334
+ super().__init__()
335
+ # Check if it is a sparse layer, if not then it is a dense layer
336
+ self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True)
337
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
338
+
339
+ def forward(self, hidden_states):
340
+ r"""
341
+ Args:
342
+ hidden_states (`torch.Tensor`) :
343
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
344
+ Returns:
345
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
346
+
347
+ """
348
+ forwarded_states = self.mlp(hidden_states)
349
+ output = hidden_states + self.norm(forwarded_states)
350
+ return output
351
+
352
+
353
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->GPTSanJapanese
354
+ class GPTSanJapaneseAttention(nn.Module):
355
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
356
+
357
+ def __init__(
358
+ self,
359
+ embed_dim: int,
360
+ num_heads: int,
361
+ dropout: float = 0.0,
362
+ is_decoder: bool = False,
363
+ bias: bool = True,
364
+ is_causal: bool = False,
365
+ config: Optional[GPTSanJapaneseConfig] = None,
366
+ ):
367
+ super().__init__()
368
+ self.embed_dim = embed_dim
369
+ self.num_heads = num_heads
370
+ self.dropout = dropout
371
+ self.head_dim = embed_dim // num_heads
372
+ self.config = config
373
+
374
+ if (self.head_dim * num_heads) != self.embed_dim:
375
+ raise ValueError(
376
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
377
+ f" and `num_heads`: {num_heads})."
378
+ )
379
+ self.scaling = self.head_dim**-0.5
380
+ self.is_decoder = is_decoder
381
+ self.is_causal = is_causal
382
+
383
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
384
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
385
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
386
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
387
+
388
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
389
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
390
+
391
+ def forward(
392
+ self,
393
+ hidden_states: torch.Tensor,
394
+ key_value_states: Optional[torch.Tensor] = None,
395
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
396
+ attention_mask: Optional[torch.Tensor] = None,
397
+ layer_head_mask: Optional[torch.Tensor] = None,
398
+ output_attentions: bool = False,
399
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
400
+ """Input shape: Batch x Time x Channel"""
401
+
402
+ # if key_value_states are provided this layer is used as a cross-attention layer
403
+ # for the decoder
404
+ is_cross_attention = key_value_states is not None
405
+
406
+ bsz, tgt_len, _ = hidden_states.size()
407
+
408
+ # get query proj
409
+ query_states = self.q_proj(hidden_states) * self.scaling
410
+ # get key, value proj
411
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
412
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
413
+ # the provided `key_value_states` to support prefix tuning
414
+ if (
415
+ is_cross_attention
416
+ and past_key_value is not None
417
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
418
+ ):
419
+ # reuse k,v, cross_attentions
420
+ key_states = past_key_value[0]
421
+ value_states = past_key_value[1]
422
+ elif is_cross_attention:
423
+ # cross_attentions
424
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
425
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
426
+ elif past_key_value is not None:
427
+ # reuse k, v, self_attention
428
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
429
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
430
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
431
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
432
+ else:
433
+ # self_attention
434
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
435
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
436
+
437
+ if self.is_decoder:
438
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
439
+ # Further calls to cross_attention layer can then reuse all cross-attention
440
+ # key/value_states (first "if" case)
441
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
442
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
443
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
444
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
445
+ past_key_value = (key_states, value_states)
446
+
447
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
448
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
449
+ key_states = key_states.reshape(*proj_shape)
450
+ value_states = value_states.reshape(*proj_shape)
451
+
452
+ src_len = key_states.size(1)
453
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
454
+
455
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
456
+ raise ValueError(
457
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
458
+ f" {attn_weights.size()}"
459
+ )
460
+
461
+ if attention_mask is not None:
462
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
463
+ raise ValueError(
464
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
465
+ )
466
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
467
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
468
+
469
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
470
+
471
+ if layer_head_mask is not None:
472
+ if layer_head_mask.size() != (self.num_heads,):
473
+ raise ValueError(
474
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
475
+ f" {layer_head_mask.size()}"
476
+ )
477
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
478
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
479
+
480
+ if output_attentions:
481
+ # this operation is a bit awkward, but it's required to
482
+ # make sure that attn_weights keeps its gradient.
483
+ # In order to do so, attn_weights have to be reshaped
484
+ # twice and have to be reused in the following
485
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
486
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
487
+ else:
488
+ attn_weights_reshaped = None
489
+
490
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
491
+
492
+ attn_output = torch.bmm(attn_probs, value_states)
493
+
494
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
495
+ raise ValueError(
496
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
497
+ f" {attn_output.size()}"
498
+ )
499
+
500
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
501
+ attn_output = attn_output.transpose(1, 2)
502
+
503
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
504
+ # partitioned across GPUs when using tensor-parallelism.
505
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
506
+
507
+ attn_output = self.out_proj(attn_output)
508
+
509
+ return attn_output, attn_weights_reshaped, past_key_value
510
+
511
+
512
+ class GPTSanJapaneseLayerSelfAttention(nn.Module):
513
+ """
514
+ Self Attention and Normalization Unit
515
+ """
516
+
517
+ def __init__(self, config, has_relative_attention_bias=False):
518
+ super().__init__()
519
+ self.self_attn = GPTSanJapaneseAttention(
520
+ embed_dim=config.d_model,
521
+ num_heads=config.num_heads,
522
+ is_decoder=True,
523
+ bias=has_relative_attention_bias,
524
+ )
525
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
526
+
527
+ def forward(
528
+ self,
529
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
530
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
531
+ attention_mask: Optional[torch.FloatTensor] = None,
532
+ head_mask: Optional[torch.FloatTensor] = None,
533
+ use_cache: Optional[bool] = False,
534
+ output_attentions: Optional[bool] = False,
535
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
536
+ r"""
537
+ Self-attention and normalize block.
538
+
539
+ Args:
540
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
541
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
542
+ if the model is configured as a decoder.
543
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
544
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
545
+ decoding. If `past_key_values` are used, the user can optionally input only the last
546
+ `decoder_input_ids` (those that don't have their past key value states given to this model) of shape
547
+ `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
548
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
549
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
550
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
551
+
552
+ - 1 for tokens that are **not masked**,
553
+ - 0 for tokens that are **masked**.
554
+
555
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
556
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
557
+
558
+ - 1 indicates the head is **not masked**,
559
+ - 0 indicates the head is **masked**.
560
+
561
+ use_cache (`bool`, *optional*):
562
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
563
+ (see `past_key_values`).
564
+ output_attentions (`bool`, *optional*):
565
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
566
+ returned tensors for more detail.
567
+ Returns:
568
+ Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
569
+ """
570
+ # Self Attention
571
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
572
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
573
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
574
+ atten_out = self.self_attn(
575
+ hidden_states=hidden_states,
576
+ past_key_value=self_attn_past_key_value,
577
+ attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min,
578
+ layer_head_mask=head_mask,
579
+ output_attentions=output_attentions,
580
+ )
581
+ if output_attentions:
582
+ attn_weights = (atten_out[1],)
583
+ else:
584
+ attn_weights = ()
585
+
586
+ attention_output = atten_out[0]
587
+
588
+ hidden = hidden_states + self.norm(attention_output)
589
+
590
+ if use_cache:
591
+ outputs = (hidden, atten_out[2]) # hidden, present, (attentions)
592
+ else:
593
+ outputs = (hidden,) # hidden, (attentions)
594
+
595
+ return outputs + attn_weights
596
+
597
+
598
+ class GPTSanJapaneseBlock(nn.Module):
599
+ """
600
+ Self Attention and FFN Unit
601
+ """
602
+
603
+ def __init__(self, config, ext_layer=False):
604
+ super().__init__()
605
+ self.self_attn = GPTSanJapaneseLayerSelfAttention(config)
606
+ self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config)
607
+
608
+ def forward(
609
+ self,
610
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
611
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
612
+ attention_mask: Optional[torch.FloatTensor] = None,
613
+ head_mask: Optional[torch.FloatTensor] = None,
614
+ use_cache: Optional[bool] = False,
615
+ output_attentions: Optional[bool] = False,
616
+ output_router_tuple: Optional[bool] = False,
617
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
618
+ r"""
619
+ GPTSAN transformer block.
620
+
621
+ Args:
622
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
623
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
624
+ if the model is configured as a decoder.
625
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
626
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
627
+ decoding. If `past_key_values` are used, the user can optionally input only the last
628
+ `decoder_input_ids` (those that don't have their past key value states given to this model) of shape
629
+ `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
630
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
631
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
632
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
633
+
634
+ - 1 for tokens that are **not masked**,
635
+ - 0 for tokens that are **masked**.
636
+
637
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
638
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
639
+
640
+ - 1 indicates the head is **not masked**,
641
+ - 0 indicates the head is **masked**.
642
+
643
+ use_cache (`bool`, *optional*):
644
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
645
+ (see `past_key_values`).
646
+ output_attentions (`bool`) :
647
+ output attention probabirities.
648
+ output_router_tuple:
649
+ output experts router logits and expert id.
650
+ Returns:
651
+ Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
652
+ """
653
+ atten_out = self.self_attn(
654
+ hidden_states=hidden_states,
655
+ past_key_value=past_key_value,
656
+ attention_mask=attention_mask,
657
+ head_mask=head_mask,
658
+ use_cache=use_cache,
659
+ output_attentions=output_attentions,
660
+ )
661
+ attention_output = atten_out[0]
662
+
663
+ if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF):
664
+ sparse_out = self.feed_forward(attention_output, output_router_tuple)
665
+ if output_router_tuple:
666
+ hidden, router_tuple = sparse_out
667
+ else:
668
+ hidden = sparse_out
669
+ else:
670
+ hidden = self.feed_forward(attention_output)
671
+
672
+ outputs = (hidden,) + atten_out[1:]
673
+
674
+ if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple:
675
+ outputs += (router_tuple,)
676
+
677
+ return outputs
678
+
679
+
680
+ class GPTSanJapanesePreTrainedModel(PreTrainedModel):
681
+ """
682
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
683
+ models.
684
+ """
685
+
686
+ config_class = GPTSanJapaneseConfig
687
+ base_model_prefix = "gptsan_japanese"
688
+ supports_gradient_checkpointing = False
689
+ _no_split_modules = ["GPTSanJapaneseBlock"]
690
+ _skip_keys_device_placement = "past_key_values"
691
+
692
+ @property
693
+ def dummy_inputs(self):
694
+ input_ids = torch.tensor(DUMMY_INPUTS)
695
+ input_mask = torch.tensor(DUMMY_MASK)
696
+ dummy_inputs = {
697
+ "input_ids": input_ids,
698
+ "attention_mask": input_mask,
699
+ }
700
+ return dummy_inputs
701
+
702
+ def _init_weights(self, module):
703
+ """Initialize the weights"""
704
+ factor = self.config.initializer_factor # Used for testing weights initialization
705
+ if isinstance(module, nn.LayerNorm):
706
+ module.weight.data.fill_(factor * 1.0)
707
+ module.bias.data.zero_()
708
+ elif isinstance(module, nn.Linear):
709
+ module.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
710
+ if hasattr(module, "bias") and module.bias is not None:
711
+ module.bias.data.zero_()
712
+ elif isinstance(module, nn.Embedding):
713
+ module.weight.data.normal_(mean=0.0, std=factor * 1.0)
714
+ elif isinstance(module, GPTSanJapaneseModel):
715
+ # Mesh TensorFlow embeddings initialization
716
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
717
+ module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0)
718
+ module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
719
+ if hasattr(module, "extra_position_embeddings") and module.extra_position_embeddings is not None:
720
+ module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
721
+ elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)):
722
+ # Mesh TensorFlow embeddings initialization
723
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
724
+ module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0)
725
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
726
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
727
+ elif isinstance(module, GPTSanJapaneseDenseActDense):
728
+ # Mesh TensorFlow FF initialization
729
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
730
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
731
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
732
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
733
+ module.wi.bias.data.zero_()
734
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
735
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
736
+ module.wo.bias.data.zero_()
737
+ elif isinstance(module, GPTSanJapaneseAttention):
738
+ # Multi-headed attention
739
+ d_model = self.config.d_model
740
+ key_value_proj_dim = self.config.d_model
741
+ n_heads = self.config.num_heads
742
+ module.k_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
743
+ module.v_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
744
+ module.q_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
745
+ module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
746
+ elif isinstance(module, GPTSanJapaneseSparseMLP):
747
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
748
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
749
+ d_model = self.config.d_model
750
+ key_value_proj_dim = self.config.d_model
751
+ n_heads = self.config.num_heads
752
+ module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1)
753
+ for idx in range(self.config.num_experts):
754
+ module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
755
+ module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
756
+
757
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right
758
+ def _shift_right(self, input_ids):
759
+ decoder_start_token_id = self.config.decoder_start_token_id
760
+ pad_token_id = self.config.pad_token_id
761
+
762
+ if decoder_start_token_id is None:
763
+ raise ValueError(
764
+ "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. "
765
+ "See T5 docs for more information."
766
+ )
767
+
768
+ # shift inputs to the right
769
+ if is_torch_fx_proxy(input_ids):
770
+ # Item assignment is not supported natively for proxies.
771
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
772
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
773
+ else:
774
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
775
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
776
+ shifted_input_ids[..., 0] = decoder_start_token_id
777
+
778
+ if pad_token_id is None:
779
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
780
+ # replace possible -100 values in labels by `pad_token_id`
781
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
782
+
783
+ return shifted_input_ids
784
+
785
+
786
+ GPTSAN_JAPANESE_START_DOCSTRING = r"""
787
+
788
+ The [GPTSAN-japanese](https://github.com/tanreinama/GPTSAN) model was proposed in General-purpose Swich transformer
789
+ based Japanese language model
790
+
791
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
792
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
793
+ and behavior.
794
+
795
+ Parameters:
796
+ config ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
797
+ Initializing with a config file does not load the weights associated with the model, only the
798
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
799
+ """
800
+
801
+ GPTSAN_JAPANESE_INPUTS_DOCSTRING = r"""
802
+ Args:
803
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
804
+ Indices of input sequence tokens in the vocabulary. GPTSAN-japanese is a model that generates sentence
805
+ continuations or predicts tokens at mask positions. Special tokens required for inputs to the model are
806
+ automatically appended.
807
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
808
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
809
+
810
+ - 1 for tokens that are **not masked**,
811
+ - 0 for tokens that are **masked**.
812
+
813
+ [What are attention masks?](../glossary#attention-mask)
814
+ token_type_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
815
+ An input that masks the Prefix part in the Prefix-LM input. Mask values selected in `[0, 1]`:
816
+
817
+ - 1 for tokens that are **prefix** input,
818
+ - 0 for tokens that are **not-prefix** input.
819
+ spout (`torch.Tensor` of shape `(batch_size, config.d_spout)`):
820
+ This vector is transformed through an 8-layer FFN and can be used instead of `past_key_values`.
821
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
822
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
823
+
824
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
825
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
826
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
827
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
828
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
829
+ use_cache (`bool`, *optional*):
830
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
831
+ `past_key_values`).
832
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
833
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
834
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
835
+ model's internal embedding lookup matrix.
836
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
837
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
838
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
839
+ input (see `past_key_values`). This is useful if you want more control over how to convert
840
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
841
+ output_attentions (`bool`, *optional*):
842
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
843
+ tensors for more detail.
844
+ output_hidden_states (`bool`, *optional*):
845
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
846
+ more detail.
847
+ return_dict (`bool`, *optional*):
848
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
849
+ router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`):
850
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
851
+ Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models.
852
+ """
853
+
854
+
855
+ @add_start_docstrings(
856
+ "The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.",
857
+ GPTSAN_JAPANESE_START_DOCSTRING,
858
+ )
859
+ class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel):
860
+ def __init__(self, config: GPTSanJapaneseConfig):
861
+ super().__init__(config)
862
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
863
+ self.config = copy.deepcopy(config)
864
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
865
+ self.last_project = nn.Linear(config.d_model, config.d_model, bias=True)
866
+ self.act = ACT2FN["swish"]
867
+
868
+ self.blocks = torch.nn.ModuleList([])
869
+ for _ in range(config.num_switch_layers):
870
+ self.blocks.append(GPTSanJapaneseBlock(config))
871
+ for _ in range(config.num_ext_layers):
872
+ self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True))
873
+
874
+ if config.num_ext_layers > 0:
875
+ self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
876
+
877
+ if config.d_spout:
878
+ spouts = []
879
+ for _ in range(8):
880
+ spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False))
881
+ spouts.append(nn.Tanh())
882
+ spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False))
883
+ self.spout = nn.Sequential(*spouts)
884
+
885
+ self.post_init()
886
+
887
+ def get_input_embeddings(self):
888
+ return self.embed_tokens
889
+
890
+ def set_input_embeddings(self, new_embeddings):
891
+ self.embed_tokens = new_embeddings
892
+
893
+ @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
894
+ def forward(
895
+ self,
896
+ input_ids: Optional[torch.LongTensor] = None,
897
+ attention_mask: Optional[torch.FloatTensor] = None,
898
+ token_type_ids: Optional[torch.FloatTensor] = None,
899
+ spout: Optional[torch.FloatTensor] = None,
900
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
901
+ head_mask: Optional[torch.FloatTensor] = None,
902
+ use_cache: Optional[bool] = False,
903
+ inputs_embeds: Optional[torch.FloatTensor] = None,
904
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
905
+ output_attentions: Optional[bool] = None,
906
+ output_hidden_states: Optional[bool] = None,
907
+ return_dict: Optional[bool] = None,
908
+ output_router_logits: Optional[bool] = None,
909
+ num_precontext: Optional[torch.LongTensor] = None,
910
+ ) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]:
911
+ r"""
912
+ num_precontext (`torch.LongTensor` of shape `(batch_size,1)`):
913
+ length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like
914
+ BERT, tokens after that refer only to front like GPT. see also:
915
+ https://github.com/tanreinama/GPTSAN/blob/main/report/model.md
916
+
917
+ Returns:
918
+ `MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns
919
+ MoEModelOutputWithPastAndCrossAttentions insted of tuple
920
+ """
921
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
922
+ device = self.position_embeddings.weight.device
923
+ if input_ids is None:
924
+ input_ids = torch.zeros([1, 1]).int().to(device) # dummy for input_ids was None
925
+ num_pasts_contexts = 0
926
+ num_batch = input_ids.shape[0]
927
+ pasts_or_spout_value = None
928
+ if past_key_values is not None:
929
+ num_pasts_contexts = past_key_values[0][0].shape[2]
930
+ elif self.config.d_spout and spout is not None:
931
+ # `spout` is a special input vector specific to GPTSAN
932
+ # This controls the output by projecting embedded information such as the class of sentences during learning.
933
+ # It should passed instead of the first past_key_value.
934
+ # See the original GPTSAN repository for details
935
+ num_pasts_contexts += 1
936
+
937
+ # If there is an attention_mask, increase first one for spout
938
+ if self.config.d_spout and spout is not None and attention_mask is not None:
939
+ attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device)
940
+ attention_mask_with_spout[:, 1:] -= 1 - attention_mask # 1st token should be spout
941
+ attention_mask = attention_mask_with_spout # update attention_mask
942
+
943
+ if num_precontext is not None:
944
+ # `num_precontext` is the number of tokens that refer to each other in prefix-lm
945
+ # created per batch, so dimension of num_precontext should be [batch, 1]
946
+ if not (
947
+ len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1
948
+ ): # num_precontext Should be [batch,1]
949
+ raise ValueError("num_precontext should be [batch, 1] size.")
950
+ num_precontext = torch.reshape(num_precontext, [-1])
951
+ else:
952
+ num_precontext = torch.zeros([num_batch]).int().to(device)
953
+
954
+ num_input_contexts = input_ids.shape[1]
955
+ num_output_contexts = num_input_contexts + num_pasts_contexts
956
+
957
+ hidden_states = self.embed_tokens(input_ids)
958
+
959
+ if past_key_values is not None:
960
+ pasts_or_spout_value = past_key_values
961
+ elif self.config.d_spout and spout is not None:
962
+ # Make vector from `spout` of GPTSAN to the same shape as past_key_values
963
+ pasts_or_spout_value = self.spout(spout) # projecting `spout` vector
964
+ pasts_or_spout_value = torch.reshape(
965
+ pasts_or_spout_value,
966
+ [
967
+ num_batch,
968
+ self.config.num_layers,
969
+ 2,
970
+ self.config.num_heads,
971
+ num_pasts_contexts,
972
+ self.config.d_model // self.config.num_heads,
973
+ ],
974
+ )
975
+ pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1)
976
+ # make same shape as past_key_values
977
+ pasts_or_spout_value = tuple(
978
+ tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value
979
+ )
980
+ else:
981
+ pasts_or_spout_value = [None] * self.config.num_layers
982
+
983
+ # Token position considering spout and pasts
984
+ token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts
985
+
986
+ if attention_mask is None:
987
+ attention_mask = torch.ones(num_batch, num_input_contexts, device=device)
988
+
989
+ # positions for get position_embeddings
990
+ gather_position = (
991
+ (
992
+ torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device)
993
+ + token_position.unsqueeze(0)
994
+ )
995
+ .transpose(1, 2)
996
+ .long()
997
+ )
998
+ # When padding with padding_side="left", zeros line up on the left side of attention_mask, so position_embeddings is shifted accordingly
999
+ gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2)
1000
+ gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1)
1001
+
1002
+ # attention_mask is applied per batch
1003
+ for i in range(num_batch):
1004
+ hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i])
1005
+
1006
+ # Create a mask to be used when making the prefix Input length of Prefix-LM variable
1007
+ causal_mask = (
1008
+ torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8))
1009
+ .view(1, 1, num_output_contexts, num_output_contexts)
1010
+ .to(device)
1011
+ )
1012
+ prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :]
1013
+ if token_type_ids is not None:
1014
+ token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2)
1015
+ prefix_lm_mask = ((prefix_lm_mask + token_type_ids) > 0).float()
1016
+ # Marge prefix_lm_mask and attention_mask
1017
+ extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2)
1018
+
1019
+ # Prepare head mask if needed
1020
+ if head_mask is not None:
1021
+ head_mask = self.get_head_mask(
1022
+ head_mask, self.config.num_switch_layers + self.config.num_ext_layers
1023
+ ) # n_layer x batch x n_heads x N x N
1024
+
1025
+ # outputs
1026
+ present_key_value_states = () if self.config.use_cache or use_cache else None
1027
+ all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None
1028
+ all_attentions = () if self.config.output_attentions or output_attentions else None
1029
+ all_router_probs = () if self.config.output_router_logits or output_router_logits else None
1030
+
1031
+ for layer, past in enumerate(pasts_or_spout_value):
1032
+ if layer == self.config.num_switch_layers:
1033
+ if self.config.num_ext_layers > 0:
1034
+ # extra_position_embeddings are extra position embeddings that are only created when extending the model with code from the original GPTSAN repository. Not used in the default model.
1035
+ # However, it is created when you create an additional layer and partially train only that location.
1036
+ # Therefore, convert_gptsan_tf_checkpoint_to_pytorch.py is used when converting and loading models created in the original GPTSAN repository.
1037
+ for i in range(num_batch):
1038
+ hidden_states[i] += torch.gather(
1039
+ self.extra_position_embeddings.weight, dim=0, index=gather_position[i]
1040
+ )
1041
+
1042
+ output_router_tuple = (
1043
+ self.config.output_router_logits or output_router_logits
1044
+ ) and layer < self.config.num_switch_layers
1045
+ block_output = self.blocks[layer](
1046
+ hidden_states=hidden_states,
1047
+ past_key_value=past,
1048
+ attention_mask=extended_attention_mask,
1049
+ head_mask=head_mask,
1050
+ use_cache=self.config.use_cache or use_cache,
1051
+ output_attentions=self.config.output_attentions or output_attentions,
1052
+ output_router_tuple=output_router_tuple,
1053
+ )
1054
+
1055
+ outpos = 0
1056
+ hidden_states = block_output[outpos]
1057
+ if self.config.output_hidden_states or output_hidden_states:
1058
+ all_hidden_states += (hidden_states,)
1059
+ if self.config.use_cache or use_cache:
1060
+ outpos += 1
1061
+ present = block_output[outpos]
1062
+ present_key_value_states += (present,)
1063
+ if self.config.output_attentions or output_attentions:
1064
+ outpos += 1
1065
+ attention_probs = block_output[outpos]
1066
+ all_attentions += (attention_probs,)
1067
+ if output_router_tuple:
1068
+ outpos += 1
1069
+ router_tuple = block_output[outpos]
1070
+ all_router_probs.append(router_tuple[0])
1071
+
1072
+ hidden_states = self.last_project(hidden_states)
1073
+ hidden_states = self.act(hidden_states)
1074
+
1075
+ if self.config.output_hidden_states or output_hidden_states:
1076
+ all_hidden_states = all_hidden_states + (hidden_states,)
1077
+
1078
+ if not return_dict:
1079
+ return tuple(
1080
+ v
1081
+ for v in [
1082
+ hidden_states,
1083
+ present_key_value_states,
1084
+ all_hidden_states,
1085
+ all_attentions,
1086
+ all_router_probs,
1087
+ ]
1088
+ if v is not None
1089
+ )
1090
+
1091
+ return MoEModelOutputWithPastAndCrossAttentions(
1092
+ last_hidden_state=hidden_states,
1093
+ past_key_values=present_key_value_states,
1094
+ hidden_states=all_hidden_states,
1095
+ attentions=all_attentions,
1096
+ router_probs=all_router_probs,
1097
+ )
1098
+
1099
+
1100
+ @add_start_docstrings(
1101
+ "The bare GPTSAN-japanese Model with a language modeling head.",
1102
+ GPTSAN_JAPANESE_START_DOCSTRING,
1103
+ )
1104
+ class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel):
1105
+ _tied_weights_keys = ["lm_head.weight"]
1106
+
1107
+ def __init__(self, config: GPTSanJapaneseConfig):
1108
+ super().__init__(config)
1109
+ self.model = GPTSanJapaneseModel(config)
1110
+ self.register_buffer("final_logits_bias", torch.zeros([1, config.vocab_size]))
1111
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
1112
+ if not self.config.torchscript:
1113
+ self.lm_head.weight = self.model.embed_tokens.weight
1114
+
1115
+ @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
1116
+ def forward(
1117
+ self,
1118
+ input_ids: Optional[torch.LongTensor] = None,
1119
+ attention_mask: Optional[torch.FloatTensor] = None,
1120
+ token_type_ids: Optional[torch.FloatTensor] = None,
1121
+ spout: Optional[torch.FloatTensor] = None,
1122
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1123
+ head_mask: Optional[torch.FloatTensor] = None,
1124
+ use_cache: Optional[bool] = False,
1125
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1126
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1127
+ output_attentions: Optional[bool] = None,
1128
+ output_hidden_states: Optional[bool] = None,
1129
+ return_dict: Optional[bool] = None,
1130
+ output_router_logits: Optional[bool] = None,
1131
+ labels: Optional[torch.LongTensor] = None,
1132
+ ) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]:
1133
+ r"""
1134
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1135
+ Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ...,
1136
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
1137
+ labels in `[0, ..., config.vocab_size]`
1138
+
1139
+ Returns:
1140
+ `MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast insted of tuple
1141
+
1142
+ Example:
1143
+
1144
+ Text Generation with regular LM Model
1145
+ ```python
1146
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
1147
+
1148
+ >>> device = "cuda"
1149
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
1150
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
1151
+ >>> x_token = tokenizer("織田信長は、", return_tensors="pt")
1152
+ >>> trainer_utils.set_seed(30)
1153
+ >>> input_ids = x_token.input_ids.to(device)
1154
+ >>> gen_token = model.generate(input_ids, max_new_tokens=50)
1155
+ >>> tokenizer.decode(gen_token[0])
1156
+ "織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..."
1157
+ ```
1158
+
1159
+ Text Generation with Prefix-LM Model
1160
+ ```python
1161
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
1162
+
1163
+ >>> device = "cuda"
1164
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
1165
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
1166
+ >>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt")
1167
+ >>> trainer_utils.set_seed(30)
1168
+ >>> input_ids = x_token.input_ids.to(device)
1169
+ >>> token_type_ids = x_token.token_type_ids.to(device)
1170
+ >>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
1171
+ >>> tokenizer.decode(gen_token[0])
1172
+ "織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..."
1173
+ ```
1174
+
1175
+ Simultaneously Text Generation And Masked Language Model
1176
+ ```python
1177
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
1178
+
1179
+ >>> device = "cuda"
1180
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
1181
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
1182
+ >>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。"
1183
+ >>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt")
1184
+ >>> trainer_utils.set_seed(30)
1185
+ >>> input_ids = x_token.input_ids.to(device)
1186
+ >>> token_type_ids = x_token.token_type_ids.to(device)
1187
+ >>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
1188
+ >>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1)
1189
+ >>> tokenizer.decode(out_mlm_token[0])
1190
+ "武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。"
1191
+
1192
+ >>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :])
1193
+ "武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..."
1194
+ ```"""
1195
+ SEG_TOKEN = self.config.separator_token_id
1196
+ use_cache = use_cache or self.config.use_cache
1197
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1198
+ model_return_dict = True
1199
+ num_precontext = None
1200
+ if input_ids is not None:
1201
+ num_batch = input_ids.shape[0]
1202
+ num_precontext = torch.zeros([num_batch]).int().to(input_ids.device)
1203
+ where_separators = torch.where(input_ids == SEG_TOKEN)
1204
+ num_precontext[where_separators[0]] += where_separators[1]
1205
+ num_precontext = num_precontext.unsqueeze(1)
1206
+
1207
+ outputs = self.model(
1208
+ input_ids,
1209
+ attention_mask,
1210
+ token_type_ids,
1211
+ spout,
1212
+ past_key_values,
1213
+ head_mask,
1214
+ use_cache,
1215
+ inputs_embeds,
1216
+ decoder_inputs_embeds,
1217
+ output_attentions,
1218
+ output_hidden_states,
1219
+ model_return_dict,
1220
+ output_router_logits,
1221
+ num_precontext,
1222
+ )
1223
+
1224
+ lm_logits = self.lm_head(outputs[0])
1225
+ if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]:
1226
+ lm_logits = lm_logits + self.final_logits_bias
1227
+
1228
+ loss = None
1229
+ z_loss = None
1230
+ router_probs = None
1231
+ aux_loss = None
1232
+ if labels is not None:
1233
+ # move labels to correct device to enable model parallelism
1234
+ labels = labels.to(lm_logits.device)
1235
+
1236
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
1237
+
1238
+ if output_router_logits:
1239
+ # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder
1240
+ router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs)
1241
+ z_loss = router_z_loss_func(router_logits)
1242
+ router_probs = nn.Softmax(dim=-1)(router_logits)
1243
+ aux_loss = load_balancing_loss_func(router_probs, expert_indexes)
1244
+
1245
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
1246
+
1247
+ if not return_dict:
1248
+ return tuple(
1249
+ v
1250
+ for v in [
1251
+ loss,
1252
+ lm_logits,
1253
+ outputs.past_key_values,
1254
+ outputs.hidden_states,
1255
+ outputs.router_probs,
1256
+ z_loss,
1257
+ aux_loss,
1258
+ ]
1259
+ if v is not None
1260
+ )
1261
+
1262
+ return MoECausalLMOutputWithPast(
1263
+ loss=loss,
1264
+ logits=lm_logits,
1265
+ past_key_values=outputs.past_key_values,
1266
+ hidden_states=outputs.hidden_states,
1267
+ attentions=outputs.attentions,
1268
+ router_logits=outputs.router_probs,
1269
+ z_loss=z_loss,
1270
+ aux_loss=aux_loss,
1271
+ )
1272
+
1273
+ def prepare_inputs_for_generation(
1274
+ self,
1275
+ input_ids: torch.LongTensor,
1276
+ attention_mask: torch.FloatTensor,
1277
+ token_type_ids: Optional[torch.FloatTensor] = None,
1278
+ spout: Optional[Union[List, torch.FloatTensor]] = None,
1279
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1280
+ **kwargs,
1281
+ ):
1282
+ if isinstance(spout, list):
1283
+ spout = torch.tensor(spout).float()
1284
+ if input_ids is not None:
1285
+ spout = spout.to(input_ids.device)
1286
+ if past_key_values is not None:
1287
+ return {
1288
+ "input_ids": input_ids[:, -1:] if input_ids is not None else None,
1289
+ "attention_mask": attention_mask,
1290
+ "token_type_ids": token_type_ids[:, -1:] if token_type_ids is not None else None,
1291
+ "spout": spout,
1292
+ "past_key_values": past_key_values,
1293
+ }
1294
+ return {
1295
+ "input_ids": input_ids,
1296
+ "attention_mask": attention_mask,
1297
+ "token_type_ids": token_type_ids,
1298
+ "spout": spout,
1299
+ "past_key_values": None,
1300
+ }
1301
+
1302
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels with SwitchTransformers->GPTSanJapanese
1303
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1304
+ return self._shift_right(labels)
1305
+
1306
+ # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration.resize_token_embeddings with MBart->GPTSanJapanese
1307
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1308
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1309
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
1310
+ return new_embeddings
1311
+
1312
+ # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration._resize_final_logits_bias with MBart->GPTSanJapanese
1313
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1314
+ old_num_tokens = self.final_logits_bias.shape[-1]
1315
+ if new_num_tokens <= old_num_tokens:
1316
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1317
+ else:
1318
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1319
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1320
+ self.register_buffer("final_logits_bias", new_bias)
1321
+
1322
+ def get_input_embeddings(self):
1323
+ return self.model.get_input_embeddings()
1324
+
1325
+ def set_input_embeddings(self, new_embeddings):
1326
+ self.model.set_input_embeddings(new_embeddings)
1327
+
1328
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.set_output_embeddings with SwitchTransformers->GPTSanJapanese
1329
+ def set_output_embeddings(self, new_embeddings):
1330
+ self.lm_head = new_embeddings
1331
+
1332
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.get_output_embeddings with SwitchTransformers->GPTSanJapanese
1333
+ def get_output_embeddings(self):
1334
+ return self.lm_head
1335
+
1336
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration._unpack_router_logits with SwitchTransformers->GPTSanJapanese
1337
+ def _unpack_router_logits(self, router_outputs):
1338
+ total_router_logits = []
1339
+ total_expert_indexes = []
1340
+ for router_output in router_outputs:
1341
+ if len(router_output[0].shape) > 1:
1342
+ router_logits, expert_indexes = router_output
1343
+ total_router_logits.append(router_logits)
1344
+ total_expert_indexes.append(expert_indexes)
1345
+ return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1)
env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for GPTSANJapanese."""
16
+ import collections
17
+ import json
18
+ import os
19
+ import re
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ PreTokenizedInput,
28
+ PreTokenizedInputPair,
29
+ TextInput,
30
+ TextInputPair,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import PaddingStrategy, logging
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
39
+
40
+ PRETRAINED_VOCAB_FILES_MAP = {
41
+ "vocab_file": {
42
+ "Tanrei/GPTSAN-japanese": "https://huggingface.co/Tanrei/GPTSAN-japanese/blob/main/vocab.txt",
43
+ },
44
+ "emoji_file": {
45
+ "Tanrei/GPTSAN-japanese": "https://huggingface.co/Tanrei/GPTSAN-japanese/blob/main/emoji.json",
46
+ },
47
+ }
48
+
49
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
50
+ "Tanrei/GPTSAN-japanese": 1280,
51
+ }
52
+
53
+
54
+ def load_vocab_and_emoji(vocab_file, emoji_file):
55
+ """Loads a vocabulary file and emoji file into a dictionary."""
56
+ with open(emoji_file, "r", encoding="utf-8") as f:
57
+ emoji = json.loads(f.read())
58
+
59
+ vocab = collections.OrderedDict()
60
+ raw_vocab = collections.OrderedDict()
61
+ ids_to_tokens = collections.OrderedDict()
62
+ with open(vocab_file, "r", encoding="utf-8") as f:
63
+ token = f.readlines()
64
+ token = [[t.rstrip("\n")] if (t == ",\n" or "," not in t) else t.rstrip("\n").split(",") for t in token]
65
+ for idx, b in enumerate(token):
66
+ ids_to_tokens[idx] = b
67
+ raw_vocab[",".join(b)] = idx
68
+ for wd in b:
69
+ vocab[wd] = idx
70
+
71
+ return vocab, raw_vocab, ids_to_tokens, emoji
72
+
73
+
74
+ class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
75
+ """
76
+ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
77
+ - Decoding byte0~byte255 tokens correctly
78
+ - Added bagofword token handling
79
+ - Return token_type_ids for Prefix-LM model
80
+ The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when
81
+ decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository
82
+ (https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input
83
+ position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a
84
+ sentence of the prefix part and the part after it as a text pair of batch input.
85
+
86
+ Example:
87
+
88
+ ```python
89
+ >>> from transformers import GPTSanJapaneseTokenizer
90
+
91
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
92
+ >>> # You can confirm both 慶応 and 慶應 are encoded to 17750
93
+ >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]
94
+ [35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
95
+
96
+ >>> # Both 慶応 and 慶應 are decoded to 慶応
97
+ >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"])
98
+ '吾輩は猫である🐯。実は慶応(慶応)大学出身'
99
+ ```
100
+
101
+ Example for Prefix-LM:
102
+
103
+ ```python
104
+ >>> from transformers import GPTSanJapaneseTokenizer
105
+
106
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
107
+ >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"]
108
+ [35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
109
+
110
+ >>> # Mask for Prefix-LM inputs
111
+ >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"]
112
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
113
+ ```
114
+
115
+ Example for batch encode:
116
+
117
+ ```python
118
+ >>> from transformers import GPTSanJapaneseTokenizer
119
+
120
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
121
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"]
122
+ [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
123
+
124
+ >>> # Mask for Prefix-LM inputs
125
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"]
126
+ [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
127
+
128
+ >>> # Mask for padding
129
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"]
130
+ [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
131
+ ```
132
+
133
+ Args:
134
+ vocab_file (`str`):
135
+ File containing the vocabulary.
136
+ emoji_file (`str`):
137
+ File containing the emoji.
138
+ unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`):
139
+ The token used for unknown charactor
140
+ pad_token (`str`, *optional*, defaults to `"<|separator|>"`):
141
+ The token used for padding
142
+ bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
143
+ The beginning of sequence token.
144
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
145
+ The end of sequence token.
146
+ sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`):
147
+ A special token to separate token to prefix part and general input part.
148
+ do_clean_text (`bool`, *optional*, defaults to `False`):
149
+ Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE.
150
+ """
151
+
152
+ vocab_files_names = VOCAB_FILES_NAMES
153
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
154
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
155
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
156
+
157
+ def __init__(
158
+ self,
159
+ vocab_file,
160
+ emoji_file,
161
+ unk_token="<|nottoken|>",
162
+ pad_token="<|separator|>",
163
+ bos_token="<|startoftext|>",
164
+ eos_token="<|endoftext|>",
165
+ sep_token="<|segmenter|>",
166
+ do_clean_text=False,
167
+ **kwargs,
168
+ ):
169
+ if not os.path.isfile(vocab_file):
170
+ raise ValueError(
171
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
172
+ " model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
173
+ )
174
+ if not os.path.isfile(emoji_file):
175
+ raise ValueError(
176
+ f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
177
+ " pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
178
+ )
179
+ self.do_clean_text = do_clean_text
180
+ self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file)
181
+ self.subword_tokenizer = SubWordJapaneseTokenizer(
182
+ vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji
183
+ )
184
+
185
+ super().__init__(
186
+ unk_token=unk_token,
187
+ pad_token=pad_token,
188
+ bos_token=bos_token,
189
+ eos_token=eos_token,
190
+ sep_token=sep_token,
191
+ do_clean_text=do_clean_text,
192
+ **kwargs,
193
+ )
194
+
195
+ @property
196
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.vocab_size
197
+ def vocab_size(self):
198
+ # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
199
+ return len(self.raw_vocab)
200
+
201
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.get_vocab
202
+ def get_vocab(self):
203
+ return dict(self.raw_vocab, **self.added_tokens_encoder)
204
+
205
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._tokenize
206
+ def _tokenize(self, text):
207
+ return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)
208
+
209
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_token_to_id
210
+ def _convert_token_to_id(self, token):
211
+ """Converts a token (str) in an id using the vocab."""
212
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
213
+
214
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_id_to_token
215
+ def _convert_id_to_token(self, index):
216
+ """Converts an index (integer) in a token (str) using the vocab."""
217
+ return self.subword_tokenizer.convert_id_to_token(index)
218
+
219
+ def convert_tokens_to_string(self, tokens):
220
+ """Converts a sequence of tokens (string) in a single string."""
221
+ words = []
222
+ byte_tokens = []
223
+ for word in tokens:
224
+ if word[:6] == "<|byte" and word[-2:] == "|>":
225
+ byte_tokens.append(int(word[6:-2]))
226
+ else:
227
+ if len(byte_tokens) > 0:
228
+ words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
229
+ byte_tokens = []
230
+ if word[:7] == "<|emoji" and word[-2:] == "|>":
231
+ words.append(self.emoji["emoji_inv"][word])
232
+ elif word == "<SP>":
233
+ words.append(" ")
234
+ elif word == "<BR>":
235
+ words.append("\n")
236
+ elif word == "<TAB>":
237
+ words.append("\t")
238
+ elif word == "<BLOCK>":
239
+ words.append("▀")
240
+ elif word == "<KIGOU>":
241
+ words.append("ǀ")
242
+ elif word == "<U2000U2BFF>":
243
+ words.append("‖")
244
+ elif word == "<|bagoftoken|>":
245
+ if len(words) > 0:
246
+ words.append(words[-1])
247
+ words.append(words[-1])
248
+ words.append(words[-1])
249
+ elif word.startswith("<|") and word.endswith("|>"):
250
+ words.append("")
251
+ else:
252
+ words.append(word)
253
+ if len(byte_tokens) > 0:
254
+ words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
255
+ text = "".join(words)
256
+ return text
257
+
258
+ @property
259
+ def default_chat_template(self):
260
+ """
261
+ A simple chat template that adds standard BOS, SEP and EOS tokens between messages while discarding role
262
+ information.
263
+ """
264
+ logger.warning_once(
265
+ "\nNo chat template is defined for this tokenizer - using the default template "
266
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
267
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
268
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
269
+ )
270
+ return (
271
+ "{% for message in messages %}"
272
+ "{% if not loop.first %}{{ bos_token}}{% endif %}"
273
+ "{{ sep_token }}{{ message.content }} {{ eos_token }}"
274
+ "{% endfor %}"
275
+ )
276
+
277
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.save_vocabulary
278
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
279
+ index = 0
280
+ if os.path.isdir(save_directory):
281
+ vocab_file = os.path.join(
282
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
283
+ )
284
+ emoji_file = os.path.join(
285
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"]
286
+ )
287
+ else:
288
+ vocab_file = (
289
+ (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
290
+ )
291
+ emoji_file = (
292
+ (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
293
+ )
294
+ with open(vocab_file, "w", encoding="utf-8") as writer:
295
+ for token_index, token in self.ids_to_tokens.items():
296
+ if index != token_index:
297
+ logger.warning(
298
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
299
+ " Please check that the vocabulary is not corrupted!"
300
+ )
301
+ index = token_index
302
+ writer.write(",".join(token) + "\n")
303
+ index += 1
304
+ with open(emoji_file, "w", encoding="utf-8") as writer:
305
+ json.dump(self.emoji, writer)
306
+ return vocab_file, emoji_file
307
+
308
+ def create_token_type_ids_from_sequences(
309
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
310
+ ) -> List[int]:
311
+ # docstyle-ignore
312
+ """
313
+ The tokenizer returns token_type_ids as separators between the Prefix part and the rest.
314
+ token_type_ids is 1 for the Prefix part and 0 for the rest of the token.
315
+
316
+ Example:
317
+ ```python
318
+ >>> from transformers import GPTSanJapaneseTokenizer
319
+
320
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
321
+ >>> x_token = tokenizer("アイウエ")
322
+ >>> # input_ids: | SOT | SEG | ア | イ | ウ | エ |
323
+ >>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 |
324
+
325
+ >>> x_token = tokenizer("", prefix_text="アイウエ")
326
+ >>> # input_ids: | SOT | ア | イ | ウ | エ | SEG |
327
+ >>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 |
328
+
329
+ >>> x_token = tokenizer("ウエ", prefix_text="アイ")
330
+ >>> # input_ids: | SOT | ア | イ | SEG | ウ | エ |
331
+ >>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 |
332
+ ```"""
333
+ prefix_len = 0
334
+ if self.sep_token in self.vocab:
335
+ segid = self.vocab[self.sep_token]
336
+ if segid in token_ids_0:
337
+ prefix_len = token_ids_0.index(segid)
338
+ if token_ids_1 is None:
339
+ total_len = len(token_ids_0)
340
+ else:
341
+ total_len = len(token_ids_0 + token_ids_1)
342
+ return prefix_len * [1] + (total_len - prefix_len) * [0]
343
+
344
+ def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs):
345
+ # GPTSAN inserts extra SEP tokens in Prefix-LM in addition to SOT for text generation.
346
+ # SOT at the beginning of the text, and SEP at the separator between the Prefix part and the rest.
347
+ if add_sep_token is None:
348
+ add_sep_token = self.sep_token not in text # If insert un-prefix position explicitly
349
+ prepared = self.bos_token if self.bos_token in self.vocab else ""
350
+ prepared += prefix_text if prefix_text is not None else ""
351
+ if add_sep_token:
352
+ prepared += self.sep_token if self.sep_token in self.vocab else ""
353
+ prepared += text
354
+ return (prepared, kwargs)
355
+
356
+ def _batch_encode_plus(
357
+ self,
358
+ batch_text_or_text_pairs: Union[
359
+ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
360
+ ],
361
+ add_special_tokens: bool = True,
362
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
363
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
364
+ max_length: Optional[int] = None,
365
+ stride: int = 0,
366
+ is_split_into_words: bool = False,
367
+ pad_to_multiple_of: Optional[int] = None,
368
+ return_tensors: Optional[str] = None,
369
+ return_token_type_ids: Optional[bool] = None,
370
+ return_attention_mask: Optional[bool] = None,
371
+ return_overflowing_tokens: bool = False,
372
+ return_special_tokens_mask: bool = False,
373
+ return_offsets_mapping: bool = False,
374
+ return_length: bool = False,
375
+ verbose: bool = True,
376
+ ) -> BatchEncoding:
377
+ # This tokenizer converts input text pairs into Prefix input and subsequent input
378
+ if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list):
379
+ # As a single text with an explicit un-prefix position
380
+ batch_prefix_texts = []
381
+ for pref, txt in batch_text_or_text_pairs:
382
+ batch_prefix_texts.append(pref + self.sep_token + txt)
383
+ batch_text_or_text_pairs = batch_prefix_texts
384
+
385
+ return super()._batch_encode_plus(
386
+ batch_text_or_text_pairs,
387
+ add_special_tokens,
388
+ padding_strategy,
389
+ truncation_strategy,
390
+ max_length,
391
+ stride,
392
+ is_split_into_words,
393
+ pad_to_multiple_of,
394
+ return_tensors,
395
+ return_token_type_ids,
396
+ return_attention_mask,
397
+ return_overflowing_tokens,
398
+ return_special_tokens_mask,
399
+ return_offsets_mapping,
400
+ return_length,
401
+ verbose,
402
+ )
403
+
404
+
405
+ class SubWordJapaneseTokenizer(object):
406
+ """
407
+ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
408
+ - Decoding byte0~byte255 tokens correctly
409
+ - Added bagofword token handling
410
+
411
+ https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the
412
+ original repository.
413
+
414
+ MIT License
415
+
416
+ Copyright (c) 2020 tanreinama
417
+
418
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
419
+ documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
420
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
421
+ permit persons to whom the Software is furnished to do so, subject to the following conditions:
422
+
423
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of
424
+ the Software.
425
+
426
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
427
+ THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
428
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
429
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
430
+ SOFTWARE.
431
+ """
432
+
433
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__init__
434
+ def __init__(self, vocab, ids_to_tokens, emoji):
435
+ self.vocab = vocab # same as swe
436
+ self.ids_to_tokens = ids_to_tokens # same as bpe
437
+ self.emoji = emoji
438
+ self.maxlen = np.max([len(w) for w in self.vocab.keys()])
439
+ self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
440
+ self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
441
+ self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
442
+ self.content_repatter4 = re.compile(
443
+ r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
444
+ )
445
+ self.content_repatter5 = re.compile(
446
+ r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
447
+ )
448
+ self.content_repatter6 = re.compile(
449
+ r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*"
450
+ )
451
+ keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
452
+ blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
453
+ self.content_trans1 = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
454
+
455
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__len__
456
+ def __len__(self):
457
+ return len(self.ids_to_tokens)
458
+
459
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.clean_text
460
+ def clean_text(self, content):
461
+ content = self.content_repatter1.sub("<URL>", content)
462
+ content = self.content_repatter2.sub("<EMAIL>", content)
463
+ content = self.content_repatter3.sub("<TEL>", content)
464
+ content = self.content_repatter4.sub("<DATE>", content)
465
+ content = self.content_repatter5.sub("<DATE>", content)
466
+ content = self.content_repatter6.sub("<PRICE>", content)
467
+ content = content.translate(self.content_trans1)
468
+ while "<BLOCK><BLOCK>" in content:
469
+ content = content.replace("<BLOCK><BLOCK>", "<BLOCK>")
470
+ return content
471
+
472
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.tokenize
473
+ def tokenize(self, text, clean=False):
474
+ text = text.replace(" ", "<SP>")
475
+ text = text.replace(" ", "<SP>")
476
+ text = text.replace("\r\n", "<BR>")
477
+ text = text.replace("\n", "<BR>")
478
+ text = text.replace("\r", "<BR>")
479
+ text = text.replace("\t", "<TAB>")
480
+ text = text.replace("—", "ー")
481
+ text = text.replace("−", "ー")
482
+ for k, v in self.emoji["emoji"].items():
483
+ if k in text:
484
+ text = text.replace(k, v)
485
+ if clean:
486
+ text = self.clean_text(text)
487
+
488
+ def check_simbol(x):
489
+ e = x.encode()
490
+ if len(x) == 1 and len(e) == 2:
491
+ c = (int(e[0]) << 8) + int(e[1])
492
+ if (
493
+ (c >= 0xC2A1 and c <= 0xC2BF)
494
+ or (c >= 0xC780 and c <= 0xC783)
495
+ or (c >= 0xCAB9 and c <= 0xCBBF)
496
+ or (c >= 0xCC80 and c <= 0xCDA2)
497
+ ):
498
+ return True
499
+ return False
500
+
501
+ def checku2e(x):
502
+ e = x.encode()
503
+ if len(x) == 1 and len(e) == 3:
504
+ c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
505
+ if c >= 0xE28080 and c <= 0xE2B07F:
506
+ return True
507
+ return False
508
+
509
+ pos = 0
510
+ result = []
511
+ while pos < len(text):
512
+ end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
513
+ candidates = [] # (token_id, token, pos)
514
+ for e in range(end, pos, -1):
515
+ wd = text[pos:e]
516
+ if wd in self.vocab:
517
+ if wd[0] == "<" and len(wd) > 2:
518
+ candidates = [(self.vocab[wd], wd, e)]
519
+ break
520
+ else:
521
+ candidates.append((self.vocab[wd], wd, e))
522
+ if len(candidates) > 0:
523
+ # the smallest token_id is adopted
524
+ _, wd, e = sorted(candidates, key=lambda x: x[0])[0]
525
+ result.append(wd)
526
+ pos = e
527
+ else:
528
+ end = pos + 1
529
+ wd = text[pos:end]
530
+ if check_simbol(wd):
531
+ result.append("<KIGOU>")
532
+ elif checku2e(wd):
533
+ result.append("<U2000U2BFF>")
534
+ else:
535
+ for i in wd.encode("utf-8"):
536
+ result.append("<|byte%d|>" % i)
537
+ pos = end
538
+ return result
539
+
540
+ def convert_id_to_token(self, index):
541
+ return self.ids_to_tokens[index][0]
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/convert_groupvit_nvlab_to_hf.cpython-310.pyc ADDED
Binary file (5.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_groupvit.cpython-310.pyc ADDED
Binary file (47.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc ADDED
Binary file (63.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py ADDED
@@ -0,0 +1,1586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GroupViT model."""
16
+
17
+
18
+ import collections.abc
19
+ import math
20
+ from dataclasses import dataclass
21
+ from typing import Any, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
30
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import (
33
+ ModelOutput,
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ logging,
37
+ replace_return_docstrings,
38
+ )
39
+ from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ _CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
45
+
46
+ GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
47
+ "nvidia/groupvit-gcc-yfcc",
48
+ # See all GroupViT models at https://huggingface.co/models?filter=groupvit
49
+ ]
50
+
51
+
52
+ # contrastive loss function, adapted from
53
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
54
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
55
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
56
+
57
+
58
+ # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit
59
+ def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor:
60
+ caption_loss = contrastive_loss(similarity)
61
+ image_loss = contrastive_loss(similarity.t())
62
+ return (caption_loss + image_loss) / 2.0
63
+
64
+
65
+ def hard_softmax(logits: torch.Tensor, dim: int):
66
+ y_soft = logits.softmax(dim)
67
+ # Straight through.
68
+ index = y_soft.max(dim, keepdim=True)[1]
69
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
70
+ ret = y_hard - y_soft.detach() + y_soft
71
+
72
+ return ret
73
+
74
+
75
+ def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor:
76
+ # more stable https://github.com/pytorch/pytorch/issues/41663
77
+ gumbel_dist = torch.distributions.gumbel.Gumbel(
78
+ torch.tensor(0.0, device=logits.device, dtype=logits.dtype),
79
+ torch.tensor(1.0, device=logits.device, dtype=logits.dtype),
80
+ )
81
+ gumbels = gumbel_dist.sample(logits.shape)
82
+
83
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
84
+ y_soft = gumbels.softmax(dim)
85
+
86
+ if hard:
87
+ # Straight through.
88
+ index = y_soft.max(dim, keepdim=True)[1]
89
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
90
+ ret = y_hard - y_soft.detach() + y_soft
91
+ else:
92
+ # Reparametrization trick.
93
+ ret = y_soft
94
+ return ret
95
+
96
+
97
+ def resize_attention_map(attentions, height, width, align_corners=False):
98
+ """
99
+ Args:
100
+ attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
101
+ height (`int`): height of the output attention map
102
+ width (`int`): width of the output attention map
103
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
104
+
105
+ Returns:
106
+ `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width]
107
+ """
108
+
109
+ scale = (height * width // attentions.shape[2]) ** 0.5
110
+ if height > width:
111
+ feat_width = int(np.round(width / scale))
112
+ feat_height = attentions.shape[2] // feat_width
113
+ else:
114
+ feat_height = int(np.round(height / scale))
115
+ feat_width = attentions.shape[2] // feat_height
116
+
117
+ batch_size = attentions.shape[0]
118
+ groups = attentions.shape[1] # number of group token
119
+ # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width]
120
+ attentions = attentions.reshape(batch_size, groups, feat_height, feat_width)
121
+ attentions = nn.functional.interpolate(
122
+ attentions, size=(height, width), mode="bilinear", align_corners=align_corners
123
+ )
124
+ return attentions
125
+
126
+
127
+ def get_grouping_from_attentions(attentions, hw_shape):
128
+ """
129
+ Args:
130
+ attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer`
131
+ hw_shape (`tuple(int)`): height and width of the output attention map
132
+ Returns:
133
+ `torch.Tensor`: the attention map of shape [batch_size, groups, height, width]
134
+ """
135
+
136
+ attn_maps = []
137
+ with torch.no_grad():
138
+ prev_attn_masks = None
139
+ for attn_masks in attentions:
140
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
141
+ attn_masks = attn_masks.permute(0, 2, 1).contiguous()
142
+ if prev_attn_masks is None:
143
+ prev_attn_masks = attn_masks
144
+ else:
145
+ prev_attn_masks = prev_attn_masks @ attn_masks
146
+ # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width]
147
+ cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape)
148
+ attn_maps.append(cur_attn_map)
149
+
150
+ # [batch_size, num_groups, height, width]
151
+ final_grouping = attn_maps[-1]
152
+
153
+ return final_grouping
154
+
155
+
156
+ class GroupViTCrossAttentionLayer(nn.Module):
157
+ def __init__(self, config: GroupViTVisionConfig):
158
+ super().__init__()
159
+ self.attn = GroupViTAttention(config)
160
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
161
+ self.mlp = GroupViTMLP(config)
162
+ self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
163
+
164
+ def forward(self, query, key):
165
+ x = query
166
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
167
+ x = x + self.mlp(self.norm2(x))
168
+ x = self.norm_post(x)
169
+ return x
170
+
171
+
172
+ class GroupViTAssignAttention(nn.Module):
173
+ def __init__(self, config: GroupViTVisionConfig):
174
+ super().__init__()
175
+ self.scale = config.hidden_size**-0.5
176
+
177
+ self.q_proj = nn.Linear(config.hidden_size, config.hidden_size)
178
+ self.k_proj = nn.Linear(config.hidden_size, config.hidden_size)
179
+ self.v_proj = nn.Linear(config.hidden_size, config.hidden_size)
180
+ self.proj = nn.Linear(config.hidden_size, config.hidden_size)
181
+ self.assign_eps = config.assign_eps
182
+
183
+ def get_attn(self, attn, gumbel=True, hard=True):
184
+ if gumbel and self.training:
185
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
186
+ else:
187
+ if hard:
188
+ attn = hard_softmax(attn, dim=-2)
189
+ else:
190
+ attn = nn.functional.softmax(attn, dim=-2)
191
+
192
+ return attn
193
+
194
+ def forward(self, query, key):
195
+ value = key
196
+ # [batch_size, query_length, channels]
197
+ query = self.q_proj(query)
198
+
199
+ # [batch_size, key_length, channels]
200
+ key = self.k_proj(key)
201
+
202
+ # [batch_size, key_length, channels]
203
+ value = self.v_proj(value)
204
+
205
+ # [batch_size, query_length, key_length]
206
+ raw_attn = (query @ key.transpose(-2, -1)) * self.scale
207
+
208
+ attn = self.get_attn(raw_attn)
209
+ soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False)
210
+
211
+ attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps)
212
+
213
+ out = attn @ value
214
+
215
+ out = self.proj(out)
216
+
217
+ return out, soft_attn
218
+
219
+
220
+ class GroupViTTokenAssign(nn.Module):
221
+ def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group):
222
+ super().__init__()
223
+ self.num_output_group = num_output_group
224
+ # norm on group_tokens
225
+ self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
226
+ assign_mlp_ratio = (
227
+ config.assign_mlp_ratio
228
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
229
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
230
+ )
231
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
232
+ self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group)
233
+ self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
234
+ # norm on x
235
+ self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
236
+ self.pre_assign_attn = GroupViTCrossAttentionLayer(config)
237
+
238
+ self.assign = GroupViTAssignAttention(config)
239
+ self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
240
+ self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size)
241
+
242
+ def project_group_token(self, group_tokens):
243
+ """
244
+ Args:
245
+ group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]
246
+
247
+ Returns:
248
+ projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]
249
+ """
250
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
251
+ projected_group_tokens = self.mlp_inter(group_tokens)
252
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
253
+ return projected_group_tokens
254
+
255
+ def forward(self, image_tokens, group_tokens):
256
+ """
257
+ Args:
258
+ image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels]
259
+ group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
260
+ """
261
+
262
+ group_tokens = self.norm_tokens(group_tokens)
263
+ image_tokens = self.norm_x(image_tokens)
264
+ # [batch_size, num_output_groups, channels]
265
+ projected_group_tokens = self.project_group_token(group_tokens)
266
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
267
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
268
+ new_image_tokens += projected_group_tokens
269
+
270
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
271
+
272
+ return new_image_tokens, attention
273
+
274
+
275
+ @dataclass
276
+ class GroupViTModelOutput(ModelOutput):
277
+ """
278
+ Args:
279
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
280
+ Contrastive loss for image-text similarity.
281
+ logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
282
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
283
+ similarity scores.
284
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
285
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
286
+ similarity scores.
287
+ segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
288
+ Classification scores for each pixel.
289
+
290
+ <Tip warning={true}>
291
+
292
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
293
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
294
+ original image size as post-processing. You should always check your logits shape and resize as needed.
295
+
296
+ </Tip>
297
+
298
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
299
+ The text embeddings obtained by applying the projection layer to the pooled output of
300
+ [`GroupViTTextModel`].
301
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
302
+ The image embeddings obtained by applying the projection layer to the pooled output of
303
+ [`GroupViTVisionModel`].
304
+ text_model_output (`BaseModelOutputWithPooling`):
305
+ The output of the [`GroupViTTextModel`].
306
+ vision_model_output (`BaseModelOutputWithPooling`):
307
+ The output of the [`GroupViTVisionModel`].
308
+ """
309
+
310
+ loss: Optional[torch.FloatTensor] = None
311
+ logits_per_image: torch.FloatTensor = None
312
+ logits_per_text: torch.FloatTensor = None
313
+ segmentation_logits: torch.FloatTensor = None
314
+ text_embeds: torch.FloatTensor = None
315
+ image_embeds: torch.FloatTensor = None
316
+ text_model_output: BaseModelOutputWithPooling = None
317
+ vision_model_output: BaseModelOutputWithPooling = None
318
+
319
+ def to_tuple(self) -> Tuple[Any]:
320
+ return tuple(
321
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
322
+ for k in self.keys()
323
+ )
324
+
325
+
326
+ class GroupViTPatchEmbeddings(nn.Module):
327
+ """
328
+ Image to Patch Embedding.
329
+ """
330
+
331
+ def __init__(
332
+ self,
333
+ image_size: int = 224,
334
+ patch_size: Union[int, Tuple[int, int]] = 16,
335
+ num_channels: int = 3,
336
+ embed_dim: int = 768,
337
+ ):
338
+ super().__init__()
339
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
340
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
341
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
342
+ self.image_size = image_size
343
+ self.patch_size = patch_size
344
+ self.num_patches = num_patches
345
+
346
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
347
+
348
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
349
+ batch_size, num_channels, height, width = pixel_values.shape
350
+ if not interpolate_pos_encoding:
351
+ if height != self.image_size[0] or width != self.image_size[1]:
352
+ raise ValueError(
353
+ f"Input image size ({height}*{width}) doesn't match model"
354
+ f" ({self.image_size[0]}*{self.image_size[1]})."
355
+ )
356
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
357
+ return x
358
+
359
+
360
+ class GroupViTVisionEmbeddings(nn.Module):
361
+ def __init__(self, config: GroupViTVisionConfig):
362
+ super().__init__()
363
+
364
+ self.patch_embeddings = GroupViTPatchEmbeddings(
365
+ image_size=config.image_size,
366
+ patch_size=config.patch_size,
367
+ num_channels=config.num_channels,
368
+ embed_dim=config.hidden_size,
369
+ )
370
+ num_patches = self.patch_embeddings.num_patches
371
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size))
372
+ self.dropout = nn.Dropout(config.dropout)
373
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
374
+ self.config = config
375
+
376
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
377
+ """
378
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
379
+ resolution images.
380
+
381
+ Source:
382
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
383
+ """
384
+
385
+ npatch = embeddings.shape[1]
386
+ if npatch == self.position_embeddings.shape[1] and height == width:
387
+ return self.position_embeddings
388
+ patch_pos_embed = self.position_embeddings
389
+ num_original_pos_embed = patch_pos_embed.shape[1]
390
+ dim = embeddings.shape[-1]
391
+ feat_height = height // self.config.patch_size
392
+ feat_width = width // self.config.patch_size
393
+ # we add a small number to avoid floating point error in the interpolation
394
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
395
+ feat_height, feat_width = feat_height + 0.1, feat_width + 0.1
396
+ original_height = original_width = math.sqrt(num_original_pos_embed)
397
+ reshaped_patch_pos_embed = patch_pos_embed.reshape(1, int(original_height), int(original_width), dim).permute(
398
+ 0, 3, 1, 2
399
+ )
400
+ scale_factor = (feat_height / original_height, feat_width / original_width)
401
+ patch_pos_embed = nn.functional.interpolate(
402
+ reshaped_patch_pos_embed,
403
+ scale_factor=scale_factor,
404
+ mode="bicubic",
405
+ align_corners=False,
406
+ )
407
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
408
+ return patch_pos_embed
409
+
410
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
411
+ batch_size, num_channels, height, width = pixel_values.shape
412
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
413
+
414
+ embeddings = self.layernorm(embeddings)
415
+
416
+ batch_size, seq_len, _ = embeddings.size()
417
+
418
+ # add positional encoding to each token
419
+ if interpolate_pos_encoding:
420
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
421
+ else:
422
+ embeddings = embeddings + self.position_embeddings
423
+
424
+ embeddings = self.dropout(embeddings)
425
+
426
+ return embeddings
427
+
428
+
429
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT
430
+ class GroupViTTextEmbeddings(nn.Module):
431
+ def __init__(self, config: GroupViTTextConfig):
432
+ super().__init__()
433
+ embed_dim = config.hidden_size
434
+
435
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
436
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
437
+
438
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
439
+ self.register_buffer(
440
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
441
+ )
442
+
443
+ def forward(
444
+ self,
445
+ input_ids: Optional[torch.LongTensor] = None,
446
+ position_ids: Optional[torch.LongTensor] = None,
447
+ inputs_embeds: Optional[torch.FloatTensor] = None,
448
+ ) -> torch.Tensor:
449
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
450
+
451
+ if position_ids is None:
452
+ position_ids = self.position_ids[:, :seq_length]
453
+
454
+ if inputs_embeds is None:
455
+ inputs_embeds = self.token_embedding(input_ids)
456
+
457
+ position_embeddings = self.position_embedding(position_ids)
458
+ embeddings = inputs_embeds + position_embeddings
459
+
460
+ return embeddings
461
+
462
+
463
+ class GroupViTStage(nn.Module):
464
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
465
+
466
+ def __init__(
467
+ self,
468
+ config: GroupViTVisionConfig,
469
+ depth: int,
470
+ num_prev_group_token: int,
471
+ num_group_token: int,
472
+ num_output_group: int,
473
+ ):
474
+ super().__init__()
475
+ self.depth = depth
476
+ self.num_group_token = num_group_token
477
+ if num_group_token > 0:
478
+ self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size))
479
+ else:
480
+ self.group_token = None
481
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)])
482
+
483
+ if num_group_token > 0:
484
+ self.downsample = GroupViTTokenAssign(
485
+ config=config,
486
+ num_group_token=num_group_token,
487
+ num_output_group=num_output_group,
488
+ )
489
+ else:
490
+ self.downsample = None
491
+
492
+ if num_prev_group_token > 0 and num_group_token > 0:
493
+ self.group_projector = nn.Sequential(
494
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps),
495
+ GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token),
496
+ )
497
+ else:
498
+ self.group_projector = None
499
+
500
+ @property
501
+ def with_group_token(self):
502
+ return self.group_token is not None
503
+
504
+ def split_x(self, x):
505
+ if self.with_group_token:
506
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
507
+ else:
508
+ return x, None
509
+
510
+ def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor:
511
+ if group_token is None:
512
+ return x
513
+ return torch.cat([x, group_token], dim=1)
514
+
515
+ def forward(
516
+ self,
517
+ hidden_states: torch.Tensor,
518
+ prev_group_token: Optional[torch.Tensor] = None,
519
+ output_attentions: Optional[bool] = False,
520
+ ) -> Tuple[torch.FloatTensor]:
521
+ """
522
+ Args:
523
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
524
+ attention_mask (`torch.FloatTensor`): attention mask of size
525
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
526
+ `(config.encoder_attention_heads,)`.
527
+ output_attentions (`bool`, *optional*):
528
+ Whether or not to return the grouping tensors of Grouping block.
529
+ """
530
+ if self.with_group_token:
531
+ group_token = self.group_token.expand(hidden_states.size(0), -1, -1)
532
+ if self.group_projector is not None:
533
+ group_token = group_token + self.group_projector(prev_group_token)
534
+ else:
535
+ group_token = None
536
+
537
+ x = hidden_states
538
+
539
+ cat_x = self.concat_x(x, group_token)
540
+ for layer in self.layers:
541
+ layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None)
542
+ cat_x = layer_out[0]
543
+
544
+ x, group_token = self.split_x(cat_x)
545
+
546
+ attention = None
547
+ if self.downsample is not None:
548
+ x, attention = self.downsample(x, group_token)
549
+
550
+ outputs = (x, group_token)
551
+ if output_attentions:
552
+ outputs = outputs + (attention,)
553
+
554
+ return outputs
555
+
556
+
557
+ class GroupViTMLP(nn.Module):
558
+ def __init__(
559
+ self,
560
+ config: GroupViTVisionConfig,
561
+ hidden_size: Optional[int] = None,
562
+ intermediate_size: Optional[int] = None,
563
+ output_size: Optional[int] = None,
564
+ ):
565
+ super().__init__()
566
+ self.config = config
567
+ self.activation_fn = ACT2FN[config.hidden_act]
568
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
569
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
570
+ output_size = output_size if output_size is not None else hidden_size
571
+ self.fc1 = nn.Linear(hidden_size, intermediate_size)
572
+ self.fc2 = nn.Linear(intermediate_size, output_size)
573
+
574
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
575
+ hidden_states = self.fc1(hidden_states)
576
+ hidden_states = self.activation_fn(hidden_states)
577
+ hidden_states = self.fc2(hidden_states)
578
+ return hidden_states
579
+
580
+
581
+ class GroupViTMixerMLP(GroupViTMLP):
582
+ def forward(self, x):
583
+ x = super().forward(x.transpose(1, 2))
584
+ return x.transpose(1, 2)
585
+
586
+
587
+ class GroupViTAttention(nn.Module):
588
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
589
+
590
+ def __init__(self, config):
591
+ super().__init__()
592
+ self.config = config
593
+ self.embed_dim = config.hidden_size
594
+ self.num_heads = config.num_attention_heads
595
+ self.head_dim = self.embed_dim // self.num_heads
596
+ if self.head_dim * self.num_heads != self.embed_dim:
597
+ raise ValueError(
598
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
599
+ f" {self.num_heads})."
600
+ )
601
+ self.scale = self.head_dim**-0.5
602
+ self.dropout = config.attention_dropout
603
+
604
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
605
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
606
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
607
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
608
+
609
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
610
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
611
+
612
+ def forward(
613
+ self,
614
+ hidden_states: torch.Tensor,
615
+ attention_mask: Optional[torch.Tensor] = None,
616
+ causal_attention_mask: Optional[torch.Tensor] = None,
617
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
618
+ output_attentions: Optional[bool] = False,
619
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
620
+ """Input shape: Batch x Time x Channel"""
621
+
622
+ bsz, tgt_len, embed_dim = hidden_states.size()
623
+ is_cross_attention = encoder_hidden_states is not None
624
+
625
+ # get query proj
626
+ query_states = self.q_proj(hidden_states) * self.scale
627
+ if is_cross_attention:
628
+ key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
629
+ value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
630
+ else:
631
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
632
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
633
+
634
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
635
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
636
+ key_states = key_states.view(*proj_shape)
637
+ value_states = value_states.view(*proj_shape)
638
+
639
+ src_len = key_states.size(1)
640
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
641
+
642
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
643
+ raise ValueError(
644
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
645
+ f" {attn_weights.size()}"
646
+ )
647
+
648
+ # apply the causal_attention_mask first
649
+ if causal_attention_mask is not None:
650
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
651
+ raise ValueError(
652
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
653
+ f" {causal_attention_mask.size()}"
654
+ )
655
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
656
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
657
+
658
+ if attention_mask is not None:
659
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
660
+ raise ValueError(
661
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
662
+ )
663
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
664
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
665
+
666
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
667
+
668
+ if output_attentions:
669
+ # this operation is a bit akward, but it's required to
670
+ # make sure that attn_weights keeps its gradient.
671
+ # In order to do so, attn_weights have to reshaped
672
+ # twice and have to be reused in the following
673
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
674
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
675
+ else:
676
+ attn_weights_reshaped = None
677
+
678
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
679
+
680
+ attn_output = torch.bmm(attn_probs, value_states)
681
+
682
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
683
+ raise ValueError(
684
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
685
+ f" {attn_output.size()}"
686
+ )
687
+
688
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
689
+ attn_output = attn_output.transpose(1, 2)
690
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
691
+
692
+ attn_output = self.out_proj(attn_output)
693
+
694
+ return attn_output, attn_weights_reshaped
695
+
696
+
697
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GroupViT
698
+ class GroupViTEncoderLayer(nn.Module):
699
+ def __init__(self, config: GroupViTConfig):
700
+ super().__init__()
701
+ self.embed_dim = config.hidden_size
702
+ self.self_attn = GroupViTAttention(config)
703
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
704
+ self.mlp = GroupViTMLP(config)
705
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
706
+
707
+ def forward(
708
+ self,
709
+ hidden_states: torch.Tensor,
710
+ attention_mask: torch.Tensor,
711
+ causal_attention_mask: torch.Tensor,
712
+ output_attentions: Optional[bool] = False,
713
+ ) -> Tuple[torch.FloatTensor]:
714
+ """
715
+ Args:
716
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
717
+ attention_mask (`torch.FloatTensor`): attention mask of size
718
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
719
+ `(config.encoder_attention_heads,)`.
720
+ output_attentions (`bool`, *optional*):
721
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
722
+ returned tensors for more detail.
723
+ """
724
+ residual = hidden_states
725
+
726
+ hidden_states = self.layer_norm1(hidden_states)
727
+ hidden_states, attn_weights = self.self_attn(
728
+ hidden_states=hidden_states,
729
+ attention_mask=attention_mask,
730
+ causal_attention_mask=causal_attention_mask,
731
+ output_attentions=output_attentions,
732
+ )
733
+ hidden_states = residual + hidden_states
734
+
735
+ residual = hidden_states
736
+ hidden_states = self.layer_norm2(hidden_states)
737
+ hidden_states = self.mlp(hidden_states)
738
+ hidden_states = residual + hidden_states
739
+
740
+ outputs = (hidden_states,)
741
+
742
+ if output_attentions:
743
+ outputs += (attn_weights,)
744
+
745
+ return outputs
746
+
747
+
748
+ class GroupViTPreTrainedModel(PreTrainedModel):
749
+ """
750
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
751
+ models.
752
+ """
753
+
754
+ config_class = GroupViTConfig
755
+ base_model_prefix = "groupvit"
756
+ supports_gradient_checkpointing = True
757
+
758
+ def _init_weights(self, module):
759
+ """Initialize the weights"""
760
+
761
+ init_range = self.config.initializer_range
762
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
763
+ # Slightly different from the TF version which uses truncated_normal for initialization
764
+ # cf https://github.com/pytorch/pytorch/pull/5617
765
+ module.weight.data.normal_(mean=0.0, std=init_range)
766
+ if module.bias is not None:
767
+ module.bias.data.zero_()
768
+ elif isinstance(module, nn.LayerNorm):
769
+ module.bias.data.zero_()
770
+ module.weight.data.fill_(1.0)
771
+
772
+ factor = self.config.initializer_factor
773
+ if isinstance(module, GroupViTTextEmbeddings):
774
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
775
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
776
+ elif isinstance(module, GroupViTAttention):
777
+ factor = self.config.initializer_factor
778
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
779
+ out_proj_std = (module.embed_dim**-0.5) * factor
780
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
781
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
782
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
783
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
784
+ elif isinstance(module, GroupViTMLP):
785
+ factor = self.config.initializer_factor
786
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
787
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
788
+ nn.init.normal_(module.fc1.weight, std=fc_std)
789
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
790
+
791
+
792
+ GROUPVIT_START_DOCSTRING = r"""
793
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
794
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
795
+ behavior.
796
+
797
+ Parameters:
798
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
799
+ Initializing with a config file does not load the weights associated with the model, only the
800
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
801
+ """
802
+
803
+ GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
804
+ Args:
805
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
806
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
807
+ it.
808
+
809
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
810
+ [`PreTrainedTokenizer.__call__`] for details.
811
+
812
+ [What are input IDs?](../glossary#input-ids)
813
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
814
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
815
+
816
+ - 1 for tokens that are **not masked**,
817
+ - 0 for tokens that are **masked**.
818
+
819
+ [What are attention masks?](../glossary#attention-mask)
820
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
821
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
822
+ config.max_position_embeddings - 1]`.
823
+
824
+ [What are position IDs?](../glossary#position-ids)
825
+ output_attentions (`bool`, *optional*):
826
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
827
+ tensors for more detail.
828
+ output_hidden_states (`bool`, *optional*):
829
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
830
+ more detail.
831
+ return_dict (`bool`, *optional*):
832
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
833
+ """
834
+
835
+ GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
836
+ Args:
837
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
838
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
839
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
840
+ output_attentions (`bool`, *optional*):
841
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
842
+ tensors for more detail.
843
+ output_hidden_states (`bool`, *optional*):
844
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
845
+ more detail.
846
+ return_dict (`bool`, *optional*):
847
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
848
+ """
849
+
850
+ GROUPVIT_INPUTS_DOCSTRING = r"""
851
+ Args:
852
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
853
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
854
+ it.
855
+
856
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
857
+ [`PreTrainedTokenizer.__call__`] for details.
858
+
859
+ [What are input IDs?](../glossary#input-ids)
860
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
861
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
862
+
863
+ - 1 for tokens that are **not masked**,
864
+ - 0 for tokens that are **masked**.
865
+
866
+ [What are attention masks?](../glossary#attention-mask)
867
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
868
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
869
+ config.max_position_embeddings - 1]`.
870
+
871
+ [What are position IDs?](../glossary#position-ids)
872
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
873
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
874
+ [`CLIPImageProcessor.__call__`] for details.
875
+ return_loss (`bool`, *optional*):
876
+ Whether or not to return the contrastive loss.
877
+ output_attentions (`bool`, *optional*):
878
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
879
+ tensors for more detail.
880
+ output_hidden_states (`bool`, *optional*):
881
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
882
+ more detail.
883
+ return_dict (`bool`, *optional*):
884
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
885
+ """
886
+
887
+
888
+ class GroupViTVisionEncoder(nn.Module):
889
+ def __init__(self, config: GroupViTVisionConfig) -> None:
890
+ super().__init__()
891
+ self.config = config
892
+ self.stages = nn.ModuleList(
893
+ [
894
+ GroupViTStage(
895
+ config=config,
896
+ depth=config.depths[i],
897
+ num_group_token=config.num_group_tokens[i],
898
+ num_output_group=config.num_output_groups[i],
899
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
900
+ )
901
+ for i in range(len(config.depths))
902
+ ]
903
+ )
904
+ self.gradient_checkpointing = False
905
+
906
+ def forward(
907
+ self,
908
+ hidden_states: torch.Tensor,
909
+ output_hidden_states: Optional[bool] = None,
910
+ output_attentions: Optional[bool] = None,
911
+ return_dict: Optional[bool] = None,
912
+ ) -> Union[tuple, BaseModelOutput]:
913
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
914
+ output_hidden_states = (
915
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
916
+ )
917
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
918
+
919
+ all_hidden_states = () if output_hidden_states else None
920
+ all_groupings = () if output_attentions else None
921
+
922
+ group_tokens = None
923
+
924
+ for i, stage in enumerate(self.stages):
925
+ if output_hidden_states:
926
+ all_hidden_states = all_hidden_states + (hidden_states,)
927
+
928
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
929
+
930
+ hidden_states = layer_outputs[0]
931
+ group_tokens = layer_outputs[1]
932
+
933
+ if output_attentions and layer_outputs[2] is not None:
934
+ all_groupings = all_groupings + (layer_outputs[2],)
935
+
936
+ if output_hidden_states:
937
+ all_hidden_states = all_hidden_states + (hidden_states,)
938
+
939
+ if not return_dict:
940
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
941
+ return BaseModelOutput(
942
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
943
+ )
944
+
945
+
946
+ class GroupViTTextEncoder(nn.Module):
947
+ """
948
+ Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a
949
+ [`GroupViTEncoderLayer`].
950
+
951
+ Args:
952
+ config: GroupViTTextConfig
953
+ """
954
+
955
+ def __init__(self, config: GroupViTTextConfig):
956
+ super().__init__()
957
+ self.config = config
958
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])
959
+ self.gradient_checkpointing = False
960
+
961
+ def forward(
962
+ self,
963
+ inputs_embeds,
964
+ attention_mask: Optional[torch.Tensor] = None,
965
+ causal_attention_mask: Optional[torch.Tensor] = None,
966
+ output_attentions: Optional[bool] = None,
967
+ output_hidden_states: Optional[bool] = None,
968
+ return_dict: Optional[bool] = None,
969
+ ) -> Union[Tuple, BaseModelOutput]:
970
+ r"""
971
+ Args:
972
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
973
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
974
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
975
+ than the model's internal embedding lookup matrix.
976
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
977
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
978
+
979
+ - 1 for tokens that are **not masked**,
980
+ - 0 for tokens that are **masked**.
981
+
982
+ [What are attention masks?](../glossary#attention-mask)
983
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
984
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
985
+
986
+ - 1 for tokens that are **not masked**,
987
+ - 0 for tokens that are **masked**.
988
+
989
+ [What are attention masks?](../glossary#attention-mask)
990
+ output_attentions (`bool`, *optional*):
991
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
992
+ returned tensors for more detail.
993
+ output_hidden_states (`bool`, *optional*):
994
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
995
+ for more detail.
996
+ return_dict (`bool`, *optional*):
997
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
998
+ """
999
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1000
+ output_hidden_states = (
1001
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1002
+ )
1003
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1004
+
1005
+ encoder_states = () if output_hidden_states else None
1006
+ all_attentions = () if output_attentions else None
1007
+
1008
+ hidden_states = inputs_embeds
1009
+ for idx, encoder_layer in enumerate(self.layers):
1010
+ if output_hidden_states:
1011
+ encoder_states = encoder_states + (hidden_states,)
1012
+ if self.gradient_checkpointing and self.training:
1013
+ layer_outputs = self._gradient_checkpointing_func(
1014
+ encoder_layer.__call__,
1015
+ hidden_states,
1016
+ attention_mask,
1017
+ causal_attention_mask,
1018
+ output_attentions,
1019
+ )
1020
+ else:
1021
+ layer_outputs = encoder_layer(
1022
+ hidden_states,
1023
+ attention_mask,
1024
+ causal_attention_mask,
1025
+ output_attentions=output_attentions,
1026
+ )
1027
+
1028
+ hidden_states = layer_outputs[0]
1029
+
1030
+ if output_attentions:
1031
+ all_attentions = all_attentions + (layer_outputs[1],)
1032
+
1033
+ if output_hidden_states:
1034
+ encoder_states = encoder_states + (hidden_states,)
1035
+
1036
+ if not return_dict:
1037
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
1038
+ return BaseModelOutput(
1039
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
1040
+ )
1041
+
1042
+
1043
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder, CLIP_TEXT->GROUPVIT_TEXT
1044
+ class GroupViTTextTransformer(nn.Module):
1045
+ def __init__(self, config: GroupViTTextConfig):
1046
+ super().__init__()
1047
+ self.config = config
1048
+ embed_dim = config.hidden_size
1049
+ self.embeddings = GroupViTTextEmbeddings(config)
1050
+ self.encoder = GroupViTTextEncoder(config)
1051
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1052
+
1053
+ # For `pooled_output` computation
1054
+ self.eos_token_id = config.eos_token_id
1055
+
1056
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
1057
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
1058
+ def forward(
1059
+ self,
1060
+ input_ids: Optional[torch.Tensor] = None,
1061
+ attention_mask: Optional[torch.Tensor] = None,
1062
+ position_ids: Optional[torch.Tensor] = None,
1063
+ output_attentions: Optional[bool] = None,
1064
+ output_hidden_states: Optional[bool] = None,
1065
+ return_dict: Optional[bool] = None,
1066
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1067
+ r"""
1068
+ Returns:
1069
+
1070
+ """
1071
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1072
+ output_hidden_states = (
1073
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1074
+ )
1075
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1076
+
1077
+ if input_ids is None:
1078
+ raise ValueError("You have to specify input_ids")
1079
+
1080
+ input_shape = input_ids.size()
1081
+ input_ids = input_ids.view(-1, input_shape[-1])
1082
+
1083
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
1084
+
1085
+ # CLIP's text model uses causal mask, prepare it here.
1086
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
1087
+ causal_attention_mask = _create_4d_causal_attention_mask(
1088
+ input_shape, hidden_states.dtype, device=hidden_states.device
1089
+ )
1090
+ # expand attention_mask
1091
+ if attention_mask is not None:
1092
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
1093
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
1094
+
1095
+ encoder_outputs = self.encoder(
1096
+ inputs_embeds=hidden_states,
1097
+ attention_mask=attention_mask,
1098
+ causal_attention_mask=causal_attention_mask,
1099
+ output_attentions=output_attentions,
1100
+ output_hidden_states=output_hidden_states,
1101
+ return_dict=return_dict,
1102
+ )
1103
+
1104
+ last_hidden_state = encoder_outputs[0]
1105
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
1106
+
1107
+ if self.eos_token_id == 2:
1108
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
1109
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
1110
+ # ------------------------------------------------------------
1111
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
1112
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
1113
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
1114
+ pooled_output = last_hidden_state[
1115
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
1116
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
1117
+ ]
1118
+ else:
1119
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
1120
+ pooled_output = last_hidden_state[
1121
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
1122
+ # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
1123
+ (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
1124
+ .int()
1125
+ .argmax(dim=-1),
1126
+ ]
1127
+
1128
+ if not return_dict:
1129
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1130
+
1131
+ return BaseModelOutputWithPooling(
1132
+ last_hidden_state=last_hidden_state,
1133
+ pooler_output=pooled_output,
1134
+ hidden_states=encoder_outputs.hidden_states,
1135
+ attentions=encoder_outputs.attentions,
1136
+ )
1137
+
1138
+
1139
+ class GroupViTTextModel(GroupViTPreTrainedModel):
1140
+ config_class = GroupViTTextConfig
1141
+
1142
+ def __init__(self, config: GroupViTTextConfig):
1143
+ super().__init__(config)
1144
+ self.text_model = GroupViTTextTransformer(config)
1145
+ # Initialize weights and apply final processing
1146
+ self.post_init()
1147
+
1148
+ def get_input_embeddings(self) -> nn.Module:
1149
+ return self.text_model.embeddings.token_embedding
1150
+
1151
+ def set_input_embeddings(self, value):
1152
+ self.text_model.embeddings.token_embedding = value
1153
+
1154
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
1155
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
1156
+ def forward(
1157
+ self,
1158
+ input_ids: Optional[torch.Tensor] = None,
1159
+ attention_mask: Optional[torch.Tensor] = None,
1160
+ position_ids: Optional[torch.Tensor] = None,
1161
+ output_attentions: Optional[bool] = None,
1162
+ output_hidden_states: Optional[bool] = None,
1163
+ return_dict: Optional[bool] = None,
1164
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1165
+ r"""
1166
+ Returns:
1167
+
1168
+ Examples:
1169
+
1170
+ ```python
1171
+ >>> from transformers import CLIPTokenizer, GroupViTTextModel
1172
+
1173
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
1174
+ >>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1175
+
1176
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1177
+
1178
+ >>> outputs = model(**inputs)
1179
+ >>> last_hidden_state = outputs.last_hidden_state
1180
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1181
+ ```"""
1182
+ return self.text_model(
1183
+ input_ids=input_ids,
1184
+ attention_mask=attention_mask,
1185
+ position_ids=position_ids,
1186
+ output_attentions=output_attentions,
1187
+ output_hidden_states=output_hidden_states,
1188
+ return_dict=return_dict,
1189
+ )
1190
+
1191
+
1192
+ class GroupViTVisionTransformer(nn.Module):
1193
+ def __init__(self, config: GroupViTVisionConfig):
1194
+ super().__init__()
1195
+ self.config = config
1196
+ embed_dim = config.hidden_size
1197
+
1198
+ self.embeddings = GroupViTVisionEmbeddings(config)
1199
+ self.encoder = GroupViTVisionEncoder(config)
1200
+ self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
1201
+
1202
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1203
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
1204
+ def forward(
1205
+ self,
1206
+ pixel_values: Optional[torch.FloatTensor] = None,
1207
+ output_hidden_states: Optional[bool] = None,
1208
+ output_attentions: Optional[bool] = None,
1209
+ return_dict: Optional[bool] = None,
1210
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1211
+ r"""
1212
+ Returns:
1213
+
1214
+ """
1215
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1216
+ output_hidden_states = (
1217
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1218
+ )
1219
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1220
+
1221
+ if pixel_values is None:
1222
+ raise ValueError("You have to specify pixel_values")
1223
+
1224
+ hidden_states = self.embeddings(pixel_values)
1225
+
1226
+ encoder_outputs = self.encoder(
1227
+ hidden_states=hidden_states,
1228
+ output_hidden_states=output_hidden_states,
1229
+ output_attentions=output_attentions,
1230
+ return_dict=return_dict,
1231
+ )
1232
+
1233
+ last_hidden_state = encoder_outputs[0]
1234
+
1235
+ # normalize the last hidden state
1236
+ last_hidden_state = self.layernorm(last_hidden_state)
1237
+ pooled_output = last_hidden_state.mean(dim=1)
1238
+
1239
+ if not return_dict:
1240
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1241
+
1242
+ return BaseModelOutputWithPooling(
1243
+ last_hidden_state=last_hidden_state,
1244
+ pooler_output=pooled_output,
1245
+ hidden_states=encoder_outputs.hidden_states,
1246
+ attentions=encoder_outputs.attentions,
1247
+ )
1248
+
1249
+
1250
+ class GroupViTVisionModel(GroupViTPreTrainedModel):
1251
+ config_class = GroupViTVisionConfig
1252
+ main_input_name = "pixel_values"
1253
+
1254
+ def __init__(self, config: GroupViTVisionConfig):
1255
+ super().__init__(config)
1256
+ self.vision_model = GroupViTVisionTransformer(config)
1257
+ # Initialize weights and apply final processing
1258
+ self.post_init()
1259
+
1260
+ def get_input_embeddings(self) -> GroupViTPatchEmbeddings:
1261
+ return self.vision_model.embeddings.patch_embeddings
1262
+
1263
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1264
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
1265
+ def forward(
1266
+ self,
1267
+ pixel_values: Optional[torch.FloatTensor] = None,
1268
+ output_attentions: Optional[bool] = None,
1269
+ output_hidden_states: Optional[bool] = None,
1270
+ return_dict: Optional[bool] = None,
1271
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
1272
+ r"""
1273
+ Returns:
1274
+
1275
+ Examples:
1276
+
1277
+ ```python
1278
+ >>> from PIL import Image
1279
+ >>> import requests
1280
+ >>> from transformers import AutoProcessor, GroupViTVisionModel
1281
+
1282
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1283
+ >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1284
+
1285
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1286
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1287
+
1288
+ >>> inputs = processor(images=image, return_tensors="pt")
1289
+
1290
+ >>> outputs = model(**inputs)
1291
+ >>> last_hidden_state = outputs.last_hidden_state
1292
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1293
+ ```"""
1294
+ return self.vision_model(
1295
+ pixel_values=pixel_values,
1296
+ output_attentions=output_attentions,
1297
+ output_hidden_states=output_hidden_states,
1298
+ return_dict=return_dict,
1299
+ )
1300
+
1301
+
1302
+ @add_start_docstrings(GROUPVIT_START_DOCSTRING)
1303
+ class GroupViTModel(GroupViTPreTrainedModel):
1304
+ config_class = GroupViTConfig
1305
+
1306
+ def __init__(self, config: GroupViTConfig):
1307
+ super().__init__(config)
1308
+
1309
+ if not isinstance(config.text_config, GroupViTTextConfig):
1310
+ raise ValueError(
1311
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
1312
+ f" {type(config.text_config)}."
1313
+ )
1314
+
1315
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
1316
+ raise ValueError(
1317
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
1318
+ f" {type(config.vision_config)}."
1319
+ )
1320
+
1321
+ text_config = config.text_config
1322
+ vision_config = config.vision_config
1323
+
1324
+ self.projection_dim = config.projection_dim
1325
+ self.projection_intermediate_dim = config.projection_intermediate_dim
1326
+ self.text_embed_dim = text_config.hidden_size
1327
+ self.vision_embed_dim = vision_config.hidden_size
1328
+
1329
+ self.text_model = GroupViTTextTransformer(text_config)
1330
+ self.vision_model = GroupViTVisionTransformer(vision_config)
1331
+
1332
+ self.visual_projection = nn.Sequential(
1333
+ nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True),
1334
+ nn.BatchNorm1d(self.projection_intermediate_dim),
1335
+ nn.ReLU(inplace=True),
1336
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
1337
+ )
1338
+ self.text_projection = nn.Sequential(
1339
+ nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True),
1340
+ nn.BatchNorm1d(self.projection_intermediate_dim),
1341
+ nn.ReLU(inplace=True),
1342
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
1343
+ )
1344
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1345
+
1346
+ # Initialize weights and apply final processing
1347
+ self.post_init()
1348
+
1349
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
1350
+ def get_text_features(
1351
+ self,
1352
+ input_ids: Optional[torch.Tensor] = None,
1353
+ attention_mask: Optional[torch.Tensor] = None,
1354
+ position_ids: Optional[torch.Tensor] = None,
1355
+ output_attentions: Optional[bool] = None,
1356
+ output_hidden_states: Optional[bool] = None,
1357
+ return_dict: Optional[bool] = None,
1358
+ ) -> torch.FloatTensor:
1359
+ r"""
1360
+ Returns:
1361
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1362
+ applying the projection layer to the pooled output of [`GroupViTTextModel`].
1363
+
1364
+ Examples:
1365
+
1366
+ ```python
1367
+ >>> from transformers import CLIPTokenizer, GroupViTModel
1368
+
1369
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1370
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
1371
+
1372
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1373
+ >>> text_features = model.get_text_features(**inputs)
1374
+ ```"""
1375
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
1376
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1377
+ output_hidden_states = (
1378
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1379
+ )
1380
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1381
+
1382
+ text_outputs = self.text_model(
1383
+ input_ids=input_ids,
1384
+ attention_mask=attention_mask,
1385
+ position_ids=position_ids,
1386
+ output_attentions=output_attentions,
1387
+ output_hidden_states=output_hidden_states,
1388
+ return_dict=return_dict,
1389
+ )
1390
+
1391
+ pooled_output = text_outputs[1]
1392
+ text_features = self.text_projection(pooled_output)
1393
+
1394
+ return text_features
1395
+
1396
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
1397
+ def get_image_features(
1398
+ self,
1399
+ pixel_values: Optional[torch.FloatTensor] = None,
1400
+ output_attentions: Optional[bool] = None,
1401
+ output_hidden_states: Optional[bool] = None,
1402
+ return_dict: Optional[bool] = None,
1403
+ ) -> torch.FloatTensor:
1404
+ r"""
1405
+ Returns:
1406
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1407
+ applying the projection layer to the pooled output of [`GroupViTVisionModel`].
1408
+
1409
+ Examples:
1410
+
1411
+ ```python
1412
+ >>> from PIL import Image
1413
+ >>> import requests
1414
+ >>> from transformers import AutoProcessor, GroupViTModel
1415
+
1416
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1417
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1418
+
1419
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1420
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1421
+
1422
+ >>> inputs = processor(images=image, return_tensors="pt")
1423
+
1424
+ >>> image_features = model.get_image_features(**inputs)
1425
+ ```"""
1426
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
1427
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1428
+ output_hidden_states = (
1429
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1430
+ )
1431
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1432
+
1433
+ vision_outputs = self.vision_model(
1434
+ pixel_values=pixel_values,
1435
+ output_attentions=output_attentions,
1436
+ output_hidden_states=output_hidden_states,
1437
+ return_dict=return_dict,
1438
+ )
1439
+
1440
+ pooled_output = vision_outputs[1] # pooled_output
1441
+ image_features = self.visual_projection(pooled_output)
1442
+
1443
+ return image_features
1444
+
1445
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING)
1446
+ @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig)
1447
+ def forward(
1448
+ self,
1449
+ input_ids: Optional[torch.LongTensor] = None,
1450
+ pixel_values: Optional[torch.FloatTensor] = None,
1451
+ attention_mask: Optional[torch.Tensor] = None,
1452
+ position_ids: Optional[torch.LongTensor] = None,
1453
+ return_loss: Optional[bool] = None,
1454
+ output_attentions: Optional[bool] = None,
1455
+ output_hidden_states: Optional[bool] = None,
1456
+ output_segmentation: Optional[bool] = None,
1457
+ return_dict: Optional[bool] = None,
1458
+ ) -> Union[Tuple, GroupViTModelOutput]:
1459
+ r"""
1460
+ Returns:
1461
+
1462
+ Examples:
1463
+
1464
+ ```python
1465
+ >>> from PIL import Image
1466
+ >>> import requests
1467
+ >>> from transformers import AutoProcessor, GroupViTModel
1468
+
1469
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
1470
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
1471
+
1472
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1473
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1474
+
1475
+ >>> inputs = processor(
1476
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1477
+ ... )
1478
+
1479
+ >>> outputs = model(**inputs)
1480
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1481
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1482
+ ```"""
1483
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
1484
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1485
+ output_segmentation = (
1486
+ output_segmentation if output_segmentation is not None else self.config.output_segmentation
1487
+ )
1488
+ if output_segmentation:
1489
+ output_attentions = True
1490
+ output_hidden_states = (
1491
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1492
+ )
1493
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1494
+
1495
+ vision_outputs = self.vision_model(
1496
+ pixel_values=pixel_values,
1497
+ output_attentions=output_attentions,
1498
+ output_hidden_states=output_hidden_states,
1499
+ return_dict=return_dict,
1500
+ )
1501
+
1502
+ text_outputs = self.text_model(
1503
+ input_ids=input_ids,
1504
+ attention_mask=attention_mask,
1505
+ position_ids=position_ids,
1506
+ output_attentions=output_attentions,
1507
+ output_hidden_states=output_hidden_states,
1508
+ return_dict=return_dict,
1509
+ )
1510
+
1511
+ image_embeds = vision_outputs[1]
1512
+ image_embeds = self.visual_projection(image_embeds)
1513
+
1514
+ text_embeds = text_outputs[1]
1515
+ text_embeds = self.text_projection(text_embeds)
1516
+
1517
+ # normalized features
1518
+ image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
1519
+ text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
1520
+
1521
+ # cosine similarity as logits
1522
+ logit_scale = self.logit_scale.exp()
1523
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1524
+ logits_per_image = logits_per_text.t()
1525
+
1526
+ seg_logits = None
1527
+ if output_segmentation:
1528
+ # grouped features
1529
+ # [batch_size_image, num_group, hidden_size]
1530
+ image_group_embeds = vision_outputs[0]
1531
+ # [batch_size_image*num_group, hidden_size]
1532
+ image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1]))
1533
+ if output_hidden_states:
1534
+ attentions = vision_outputs[3]
1535
+ else:
1536
+ attentions = vision_outputs[2]
1537
+ # [batch_size_image, num_group, height, width]
1538
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
1539
+
1540
+ # normalized features
1541
+ image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True)
1542
+ # [batch_size_image x num_group, batch_size_text]
1543
+ logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale
1544
+ # [batch_size_image, batch_size_text, num_group]
1545
+ logits_per_image_group = logits_per_image_group.reshape(
1546
+ image_embeds.shape[0], -1, text_embeds.shape[0]
1547
+ ).permute(0, 2, 1)
1548
+
1549
+ # [batch_size_image, batch_size_text, height x width]
1550
+ flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1)
1551
+
1552
+ # [batch_size_image, batch_size_text, height, width]
1553
+ seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale
1554
+ seg_logits = seg_logits.reshape(
1555
+ seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]
1556
+ )
1557
+
1558
+ loss = None
1559
+ if return_loss:
1560
+ loss = groupvit_loss(logits_per_text)
1561
+
1562
+ if not return_dict:
1563
+ if seg_logits is not None:
1564
+ output = (
1565
+ logits_per_image,
1566
+ logits_per_text,
1567
+ seg_logits,
1568
+ text_embeds,
1569
+ image_embeds,
1570
+ text_outputs,
1571
+ vision_outputs,
1572
+ )
1573
+ else:
1574
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1575
+ return ((loss,) + output) if loss is not None else output
1576
+
1577
+ return GroupViTModelOutput(
1578
+ loss=loss,
1579
+ logits_per_image=logits_per_image,
1580
+ logits_per_text=logits_per_text,
1581
+ segmentation_logits=seg_logits,
1582
+ text_embeds=text_embeds,
1583
+ image_embeds=image_embeds,
1584
+ text_model_output=text_outputs,
1585
+ vision_model_output=vision_outputs,
1586
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__init__.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_mobilebert": [
28
+ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
29
+ "MobileBertConfig",
30
+ "MobileBertOnnxConfig",
31
+ ],
32
+ "tokenization_mobilebert": ["MobileBertTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_tokenizers_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["tokenization_mobilebert_fast"] = ["MobileBertTokenizerFast"]
42
+
43
+ try:
44
+ if not is_torch_available():
45
+ raise OptionalDependencyNotAvailable()
46
+ except OptionalDependencyNotAvailable:
47
+ pass
48
+ else:
49
+ _import_structure["modeling_mobilebert"] = [
50
+ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
51
+ "MobileBertForMaskedLM",
52
+ "MobileBertForMultipleChoice",
53
+ "MobileBertForNextSentencePrediction",
54
+ "MobileBertForPreTraining",
55
+ "MobileBertForQuestionAnswering",
56
+ "MobileBertForSequenceClassification",
57
+ "MobileBertForTokenClassification",
58
+ "MobileBertLayer",
59
+ "MobileBertModel",
60
+ "MobileBertPreTrainedModel",
61
+ "load_tf_weights_in_mobilebert",
62
+ ]
63
+
64
+ try:
65
+ if not is_tf_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ _import_structure["modeling_tf_mobilebert"] = [
71
+ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
72
+ "TFMobileBertForMaskedLM",
73
+ "TFMobileBertForMultipleChoice",
74
+ "TFMobileBertForNextSentencePrediction",
75
+ "TFMobileBertForPreTraining",
76
+ "TFMobileBertForQuestionAnswering",
77
+ "TFMobileBertForSequenceClassification",
78
+ "TFMobileBertForTokenClassification",
79
+ "TFMobileBertMainLayer",
80
+ "TFMobileBertModel",
81
+ "TFMobileBertPreTrainedModel",
82
+ ]
83
+
84
+
85
+ if TYPE_CHECKING:
86
+ from .configuration_mobilebert import (
87
+ MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
88
+ MobileBertConfig,
89
+ MobileBertOnnxConfig,
90
+ )
91
+ from .tokenization_mobilebert import MobileBertTokenizer
92
+
93
+ try:
94
+ if not is_tokenizers_available():
95
+ raise OptionalDependencyNotAvailable()
96
+ except OptionalDependencyNotAvailable:
97
+ pass
98
+ else:
99
+ from .tokenization_mobilebert_fast import MobileBertTokenizerFast
100
+
101
+ try:
102
+ if not is_torch_available():
103
+ raise OptionalDependencyNotAvailable()
104
+ except OptionalDependencyNotAvailable:
105
+ pass
106
+ else:
107
+ from .modeling_mobilebert import (
108
+ MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
109
+ MobileBertForMaskedLM,
110
+ MobileBertForMultipleChoice,
111
+ MobileBertForNextSentencePrediction,
112
+ MobileBertForPreTraining,
113
+ MobileBertForQuestionAnswering,
114
+ MobileBertForSequenceClassification,
115
+ MobileBertForTokenClassification,
116
+ MobileBertLayer,
117
+ MobileBertModel,
118
+ MobileBertPreTrainedModel,
119
+ load_tf_weights_in_mobilebert,
120
+ )
121
+
122
+ try:
123
+ if not is_tf_available():
124
+ raise OptionalDependencyNotAvailable()
125
+ except OptionalDependencyNotAvailable:
126
+ pass
127
+ else:
128
+ from .modeling_tf_mobilebert import (
129
+ TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
130
+ TFMobileBertForMaskedLM,
131
+ TFMobileBertForMultipleChoice,
132
+ TFMobileBertForNextSentencePrediction,
133
+ TFMobileBertForPreTraining,
134
+ TFMobileBertForQuestionAnswering,
135
+ TFMobileBertForSequenceClassification,
136
+ TFMobileBertForTokenClassification,
137
+ TFMobileBertMainLayer,
138
+ TFMobileBertModel,
139
+ TFMobileBertPreTrainedModel,
140
+ )
141
+
142
+ else:
143
+ import sys
144
+
145
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc ADDED
Binary file (7.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/convert_mobilebert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc ADDED
Binary file (48.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc ADDED
Binary file (56.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc ADDED
Binary file (7.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/configuration_mobilebert.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MobileBERT model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ "google/mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/config.json"
28
+ }
29
+
30
+
31
+ class MobileBertConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`MobileBertModel`] or a [`TFMobileBertModel`]. It
34
+ is used to instantiate a MobileBERT model according to the specified arguments, defining the model architecture.
35
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileBERT
36
+ [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 30522):
44
+ Vocabulary size of the MobileBERT model. Defines the number of different tokens that can be represented by
45
+ the `inputs_ids` passed when calling [`MobileBertModel`] or [`TFMobileBertModel`].
46
+ hidden_size (`int`, *optional*, defaults to 512):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 24):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 4):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 512):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`MobileBertModel`] or
66
+ [`TFMobileBertModel`].
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+
72
+ pad_token_id (`int`, *optional*, defaults to 0):
73
+ The ID of the token in the word embedding to use as padding.
74
+ embedding_size (`int`, *optional*, defaults to 128):
75
+ The dimension of the word embedding vectors.
76
+ trigram_input (`bool`, *optional*, defaults to `True`):
77
+ Use a convolution of trigram as input.
78
+ use_bottleneck (`bool`, *optional*, defaults to `True`):
79
+ Whether to use bottleneck in BERT.
80
+ intra_bottleneck_size (`int`, *optional*, defaults to 128):
81
+ Size of bottleneck layer output.
82
+ use_bottleneck_attention (`bool`, *optional*, defaults to `False`):
83
+ Whether to use attention inputs from the bottleneck transformation.
84
+ key_query_shared_bottleneck (`bool`, *optional*, defaults to `True`):
85
+ Whether to use the same linear transformation for query&key in the bottleneck.
86
+ num_feedforward_networks (`int`, *optional*, defaults to 4):
87
+ Number of FFNs in a block.
88
+ normalization_type (`str`, *optional*, defaults to `"no_norm"`):
89
+ The normalization type in MobileBERT.
90
+ classifier_dropout (`float`, *optional*):
91
+ The dropout ratio for the classification head.
92
+
93
+ Examples:
94
+
95
+ ```python
96
+ >>> from transformers import MobileBertConfig, MobileBertModel
97
+
98
+ >>> # Initializing a MobileBERT configuration
99
+ >>> configuration = MobileBertConfig()
100
+
101
+ >>> # Initializing a model (with random weights) from the configuration above
102
+ >>> model = MobileBertModel(configuration)
103
+
104
+ >>> # Accessing the model configuration
105
+ >>> configuration = model.config
106
+ ```
107
+
108
+ Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained
109
+ checkpoints.
110
+ """
111
+
112
+ pretrained_config_archive_map = MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
113
+ model_type = "mobilebert"
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_size=30522,
118
+ hidden_size=512,
119
+ num_hidden_layers=24,
120
+ num_attention_heads=4,
121
+ intermediate_size=512,
122
+ hidden_act="relu",
123
+ hidden_dropout_prob=0.0,
124
+ attention_probs_dropout_prob=0.1,
125
+ max_position_embeddings=512,
126
+ type_vocab_size=2,
127
+ initializer_range=0.02,
128
+ layer_norm_eps=1e-12,
129
+ pad_token_id=0,
130
+ embedding_size=128,
131
+ trigram_input=True,
132
+ use_bottleneck=True,
133
+ intra_bottleneck_size=128,
134
+ use_bottleneck_attention=False,
135
+ key_query_shared_bottleneck=True,
136
+ num_feedforward_networks=4,
137
+ normalization_type="no_norm",
138
+ classifier_activation=True,
139
+ classifier_dropout=None,
140
+ **kwargs,
141
+ ):
142
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
143
+
144
+ self.vocab_size = vocab_size
145
+ self.hidden_size = hidden_size
146
+ self.num_hidden_layers = num_hidden_layers
147
+ self.num_attention_heads = num_attention_heads
148
+ self.hidden_act = hidden_act
149
+ self.intermediate_size = intermediate_size
150
+ self.hidden_dropout_prob = hidden_dropout_prob
151
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
152
+ self.max_position_embeddings = max_position_embeddings
153
+ self.type_vocab_size = type_vocab_size
154
+ self.initializer_range = initializer_range
155
+ self.layer_norm_eps = layer_norm_eps
156
+ self.embedding_size = embedding_size
157
+ self.trigram_input = trigram_input
158
+ self.use_bottleneck = use_bottleneck
159
+ self.intra_bottleneck_size = intra_bottleneck_size
160
+ self.use_bottleneck_attention = use_bottleneck_attention
161
+ self.key_query_shared_bottleneck = key_query_shared_bottleneck
162
+ self.num_feedforward_networks = num_feedforward_networks
163
+ self.normalization_type = normalization_type
164
+ self.classifier_activation = classifier_activation
165
+
166
+ if self.use_bottleneck:
167
+ self.true_hidden_size = intra_bottleneck_size
168
+ else:
169
+ self.true_hidden_size = hidden_size
170
+
171
+ self.classifier_dropout = classifier_dropout
172
+
173
+
174
+ # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Bert->MobileBert
175
+ class MobileBertOnnxConfig(OnnxConfig):
176
+ @property
177
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
178
+ if self.task == "multiple-choice":
179
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
180
+ else:
181
+ dynamic_axis = {0: "batch", 1: "sequence"}
182
+ return OrderedDict(
183
+ [
184
+ ("input_ids", dynamic_axis),
185
+ ("attention_mask", dynamic_axis),
186
+ ("token_type_ids", dynamic_axis),
187
+ ]
188
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+ import torch
18
+
19
+ from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
20
+ from transformers.utils import logging
21
+
22
+
23
+ logging.set_verbosity_info()
24
+
25
+
26
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path):
27
+ # Initialise PyTorch model
28
+ config = MobileBertConfig.from_json_file(mobilebert_config_file)
29
+ print(f"Building PyTorch model from configuration: {config}")
30
+ model = MobileBertForPreTraining(config)
31
+ # Load weights from tf checkpoint
32
+ model = load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path)
33
+ # Save pytorch-model
34
+ print(f"Save PyTorch model to {pytorch_dump_path}")
35
+ torch.save(model.state_dict(), pytorch_dump_path)
36
+
37
+
38
+ if __name__ == "__main__":
39
+ parser = argparse.ArgumentParser()
40
+ # Required parameters
41
+ parser.add_argument(
42
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
43
+ )
44
+ parser.add_argument(
45
+ "--mobilebert_config_file",
46
+ default=None,
47
+ type=str,
48
+ required=True,
49
+ help=(
50
+ "The config json file corresponding to the pre-trained MobileBERT model. \n"
51
+ "This specifies the model architecture."
52
+ ),
53
+ )
54
+ parser.add_argument(
55
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
56
+ )
57
+ args = parser.parse_args()
58
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_mobilebert.py ADDED
@@ -0,0 +1,1617 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ #
3
+ # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
4
+ #
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ import math
24
+ import os
25
+ import warnings
26
+ from dataclasses import dataclass
27
+ from typing import Optional, Tuple, Union
28
+
29
+ import torch
30
+ from torch import nn
31
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
32
+
33
+ from ...activations import ACT2FN
34
+ from ...modeling_outputs import (
35
+ BaseModelOutput,
36
+ BaseModelOutputWithPooling,
37
+ MaskedLMOutput,
38
+ MultipleChoiceModelOutput,
39
+ NextSentencePredictorOutput,
40
+ QuestionAnsweringModelOutput,
41
+ SequenceClassifierOutput,
42
+ TokenClassifierOutput,
43
+ )
44
+ from ...modeling_utils import PreTrainedModel
45
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
46
+ from ...utils import (
47
+ ModelOutput,
48
+ add_code_sample_docstrings,
49
+ add_start_docstrings,
50
+ add_start_docstrings_to_model_forward,
51
+ logging,
52
+ replace_return_docstrings,
53
+ )
54
+ from .configuration_mobilebert import MobileBertConfig
55
+
56
+
57
+ logger = logging.get_logger(__name__)
58
+
59
+ _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
60
+ _CONFIG_FOR_DOC = "MobileBertConfig"
61
+
62
+ # TokenClassification docstring
63
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "mrm8488/mobilebert-finetuned-ner"
64
+ _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']"
65
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.03
66
+
67
+ # QuestionAnswering docstring
68
+ _CHECKPOINT_FOR_QA = "csarron/mobilebert-uncased-squad-v2"
69
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
70
+ _QA_EXPECTED_LOSS = 3.98
71
+ _QA_TARGET_START_INDEX = 12
72
+ _QA_TARGET_END_INDEX = 13
73
+
74
+ # SequenceClassification docstring
75
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "lordtt13/emo-mobilebert"
76
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'others'"
77
+ _SEQ_CLASS_EXPECTED_LOSS = "4.72"
78
+
79
+ MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["google/mobilebert-uncased"]
80
+
81
+
82
+ def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
83
+ """Load tf checkpoints in a pytorch model."""
84
+ try:
85
+ import re
86
+
87
+ import numpy as np
88
+ import tensorflow as tf
89
+ except ImportError:
90
+ logger.error(
91
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
92
+ "https://www.tensorflow.org/install/ for installation instructions."
93
+ )
94
+ raise
95
+ tf_path = os.path.abspath(tf_checkpoint_path)
96
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
97
+ # Load weights from TF model
98
+ init_vars = tf.train.list_variables(tf_path)
99
+ names = []
100
+ arrays = []
101
+ for name, shape in init_vars:
102
+ logger.info(f"Loading TF weight {name} with shape {shape}")
103
+ array = tf.train.load_variable(tf_path, name)
104
+ names.append(name)
105
+ arrays.append(array)
106
+
107
+ for name, array in zip(names, arrays):
108
+ name = name.replace("ffn_layer", "ffn")
109
+ name = name.replace("FakeLayerNorm", "LayerNorm")
110
+ name = name.replace("extra_output_weights", "dense/kernel")
111
+ name = name.replace("bert", "mobilebert")
112
+ name = name.split("/")
113
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
114
+ # which are not required for using pretrained model
115
+ if any(
116
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
117
+ for n in name
118
+ ):
119
+ logger.info(f"Skipping {'/'.join(name)}")
120
+ continue
121
+ pointer = model
122
+ for m_name in name:
123
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
124
+ scope_names = re.split(r"_(\d+)", m_name)
125
+ else:
126
+ scope_names = [m_name]
127
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
128
+ pointer = getattr(pointer, "weight")
129
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
130
+ pointer = getattr(pointer, "bias")
131
+ elif scope_names[0] == "output_weights":
132
+ pointer = getattr(pointer, "weight")
133
+ elif scope_names[0] == "squad":
134
+ pointer = getattr(pointer, "classifier")
135
+ else:
136
+ try:
137
+ pointer = getattr(pointer, scope_names[0])
138
+ except AttributeError:
139
+ logger.info(f"Skipping {'/'.join(name)}")
140
+ continue
141
+ if len(scope_names) >= 2:
142
+ num = int(scope_names[1])
143
+ pointer = pointer[num]
144
+ if m_name[-11:] == "_embeddings":
145
+ pointer = getattr(pointer, "weight")
146
+ elif m_name == "kernel":
147
+ array = np.transpose(array)
148
+ try:
149
+ assert (
150
+ pointer.shape == array.shape
151
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
152
+ except AssertionError as e:
153
+ e.args += (pointer.shape, array.shape)
154
+ raise
155
+ logger.info(f"Initialize PyTorch weight {name}")
156
+ pointer.data = torch.from_numpy(array)
157
+ return model
158
+
159
+
160
+ class NoNorm(nn.Module):
161
+ def __init__(self, feat_size, eps=None):
162
+ super().__init__()
163
+ self.bias = nn.Parameter(torch.zeros(feat_size))
164
+ self.weight = nn.Parameter(torch.ones(feat_size))
165
+
166
+ def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
167
+ return input_tensor * self.weight + self.bias
168
+
169
+
170
+ NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm}
171
+
172
+
173
+ class MobileBertEmbeddings(nn.Module):
174
+ """Construct the embeddings from word, position and token_type embeddings."""
175
+
176
+ def __init__(self, config):
177
+ super().__init__()
178
+ self.trigram_input = config.trigram_input
179
+ self.embedding_size = config.embedding_size
180
+ self.hidden_size = config.hidden_size
181
+
182
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
183
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
184
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
185
+
186
+ embed_dim_multiplier = 3 if self.trigram_input else 1
187
+ embedded_input_size = self.embedding_size * embed_dim_multiplier
188
+ self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
189
+
190
+ self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
191
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
192
+
193
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
194
+ self.register_buffer(
195
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
196
+ )
197
+
198
+ def forward(
199
+ self,
200
+ input_ids: Optional[torch.LongTensor] = None,
201
+ token_type_ids: Optional[torch.LongTensor] = None,
202
+ position_ids: Optional[torch.LongTensor] = None,
203
+ inputs_embeds: Optional[torch.FloatTensor] = None,
204
+ ) -> torch.Tensor:
205
+ if input_ids is not None:
206
+ input_shape = input_ids.size()
207
+ else:
208
+ input_shape = inputs_embeds.size()[:-1]
209
+
210
+ seq_length = input_shape[1]
211
+
212
+ if position_ids is None:
213
+ position_ids = self.position_ids[:, :seq_length]
214
+
215
+ if token_type_ids is None:
216
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
217
+ if inputs_embeds is None:
218
+ inputs_embeds = self.word_embeddings(input_ids)
219
+
220
+ if self.trigram_input:
221
+ # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
222
+ # Devices (https://arxiv.org/abs/2004.02984)
223
+ #
224
+ # The embedding table in BERT models accounts for a substantial proportion of model size. To compress
225
+ # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
226
+ # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
227
+ # dimensional output.
228
+ inputs_embeds = torch.cat(
229
+ [
230
+ nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0.0),
231
+ inputs_embeds,
232
+ nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0.0),
233
+ ],
234
+ dim=2,
235
+ )
236
+ if self.trigram_input or self.embedding_size != self.hidden_size:
237
+ inputs_embeds = self.embedding_transformation(inputs_embeds)
238
+
239
+ # Add positional embeddings and token type embeddings, then layer
240
+ # normalize and perform dropout.
241
+ position_embeddings = self.position_embeddings(position_ids)
242
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
243
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
244
+ embeddings = self.LayerNorm(embeddings)
245
+ embeddings = self.dropout(embeddings)
246
+ return embeddings
247
+
248
+
249
+ class MobileBertSelfAttention(nn.Module):
250
+ def __init__(self, config):
251
+ super().__init__()
252
+ self.num_attention_heads = config.num_attention_heads
253
+ self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
254
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
255
+
256
+ self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
257
+ self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
258
+ self.value = nn.Linear(
259
+ config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
260
+ )
261
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
262
+
263
+ def transpose_for_scores(self, x):
264
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
265
+ x = x.view(new_x_shape)
266
+ return x.permute(0, 2, 1, 3)
267
+
268
+ def forward(
269
+ self,
270
+ query_tensor: torch.Tensor,
271
+ key_tensor: torch.Tensor,
272
+ value_tensor: torch.Tensor,
273
+ attention_mask: Optional[torch.FloatTensor] = None,
274
+ head_mask: Optional[torch.FloatTensor] = None,
275
+ output_attentions: Optional[bool] = None,
276
+ ) -> Tuple[torch.Tensor]:
277
+ mixed_query_layer = self.query(query_tensor)
278
+ mixed_key_layer = self.key(key_tensor)
279
+ mixed_value_layer = self.value(value_tensor)
280
+
281
+ query_layer = self.transpose_for_scores(mixed_query_layer)
282
+ key_layer = self.transpose_for_scores(mixed_key_layer)
283
+ value_layer = self.transpose_for_scores(mixed_value_layer)
284
+
285
+ # Take the dot product between "query" and "key" to get the raw attention scores.
286
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
287
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
288
+ if attention_mask is not None:
289
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
290
+ attention_scores = attention_scores + attention_mask
291
+ # Normalize the attention scores to probabilities.
292
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
293
+ # This is actually dropping out entire tokens to attend to, which might
294
+ # seem a bit unusual, but is taken from the original Transformer paper.
295
+ attention_probs = self.dropout(attention_probs)
296
+ # Mask heads if we want to
297
+ if head_mask is not None:
298
+ attention_probs = attention_probs * head_mask
299
+ context_layer = torch.matmul(attention_probs, value_layer)
300
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
301
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
302
+ context_layer = context_layer.view(new_context_layer_shape)
303
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
304
+ return outputs
305
+
306
+
307
+ class MobileBertSelfOutput(nn.Module):
308
+ def __init__(self, config):
309
+ super().__init__()
310
+ self.use_bottleneck = config.use_bottleneck
311
+ self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
312
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
313
+ if not self.use_bottleneck:
314
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
315
+
316
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
317
+ layer_outputs = self.dense(hidden_states)
318
+ if not self.use_bottleneck:
319
+ layer_outputs = self.dropout(layer_outputs)
320
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
321
+ return layer_outputs
322
+
323
+
324
+ class MobileBertAttention(nn.Module):
325
+ def __init__(self, config):
326
+ super().__init__()
327
+ self.self = MobileBertSelfAttention(config)
328
+ self.output = MobileBertSelfOutput(config)
329
+ self.pruned_heads = set()
330
+
331
+ def prune_heads(self, heads):
332
+ if len(heads) == 0:
333
+ return
334
+ heads, index = find_pruneable_heads_and_indices(
335
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
336
+ )
337
+
338
+ # Prune linear layers
339
+ self.self.query = prune_linear_layer(self.self.query, index)
340
+ self.self.key = prune_linear_layer(self.self.key, index)
341
+ self.self.value = prune_linear_layer(self.self.value, index)
342
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
343
+
344
+ # Update hyper params and store pruned heads
345
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
346
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
347
+ self.pruned_heads = self.pruned_heads.union(heads)
348
+
349
+ def forward(
350
+ self,
351
+ query_tensor: torch.Tensor,
352
+ key_tensor: torch.Tensor,
353
+ value_tensor: torch.Tensor,
354
+ layer_input: torch.Tensor,
355
+ attention_mask: Optional[torch.FloatTensor] = None,
356
+ head_mask: Optional[torch.FloatTensor] = None,
357
+ output_attentions: Optional[bool] = None,
358
+ ) -> Tuple[torch.Tensor]:
359
+ self_outputs = self.self(
360
+ query_tensor,
361
+ key_tensor,
362
+ value_tensor,
363
+ attention_mask,
364
+ head_mask,
365
+ output_attentions,
366
+ )
367
+ # Run a linear projection of `hidden_size` then add a residual
368
+ # with `layer_input`.
369
+ attention_output = self.output(self_outputs[0], layer_input)
370
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
371
+ return outputs
372
+
373
+
374
+ class MobileBertIntermediate(nn.Module):
375
+ def __init__(self, config):
376
+ super().__init__()
377
+ self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
378
+ if isinstance(config.hidden_act, str):
379
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
380
+ else:
381
+ self.intermediate_act_fn = config.hidden_act
382
+
383
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
384
+ hidden_states = self.dense(hidden_states)
385
+ hidden_states = self.intermediate_act_fn(hidden_states)
386
+ return hidden_states
387
+
388
+
389
+ class OutputBottleneck(nn.Module):
390
+ def __init__(self, config):
391
+ super().__init__()
392
+ self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
393
+ self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
394
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
395
+
396
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
397
+ layer_outputs = self.dense(hidden_states)
398
+ layer_outputs = self.dropout(layer_outputs)
399
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
400
+ return layer_outputs
401
+
402
+
403
+ class MobileBertOutput(nn.Module):
404
+ def __init__(self, config):
405
+ super().__init__()
406
+ self.use_bottleneck = config.use_bottleneck
407
+ self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
408
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
409
+ if not self.use_bottleneck:
410
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
411
+ else:
412
+ self.bottleneck = OutputBottleneck(config)
413
+
414
+ def forward(
415
+ self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor
416
+ ) -> torch.Tensor:
417
+ layer_output = self.dense(intermediate_states)
418
+ if not self.use_bottleneck:
419
+ layer_output = self.dropout(layer_output)
420
+ layer_output = self.LayerNorm(layer_output + residual_tensor_1)
421
+ else:
422
+ layer_output = self.LayerNorm(layer_output + residual_tensor_1)
423
+ layer_output = self.bottleneck(layer_output, residual_tensor_2)
424
+ return layer_output
425
+
426
+
427
+ class BottleneckLayer(nn.Module):
428
+ def __init__(self, config):
429
+ super().__init__()
430
+ self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
431
+ self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
432
+
433
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
434
+ layer_input = self.dense(hidden_states)
435
+ layer_input = self.LayerNorm(layer_input)
436
+ return layer_input
437
+
438
+
439
+ class Bottleneck(nn.Module):
440
+ def __init__(self, config):
441
+ super().__init__()
442
+ self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
443
+ self.use_bottleneck_attention = config.use_bottleneck_attention
444
+ self.input = BottleneckLayer(config)
445
+ if self.key_query_shared_bottleneck:
446
+ self.attention = BottleneckLayer(config)
447
+
448
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
449
+ # This method can return three different tuples of values. These different values make use of bottlenecks,
450
+ # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
451
+ # usage. These linear layer have weights that are learned during training.
452
+ #
453
+ # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
454
+ # key, query, value, and "layer input" to be used by the attention layer.
455
+ # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
456
+ # in the attention self output, after the attention scores have been computed.
457
+ #
458
+ # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
459
+ # four values, three of which have been passed through a bottleneck: the query and key, passed through the same
460
+ # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
461
+ #
462
+ # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
463
+ # and the residual layer will be this value passed through a bottleneck.
464
+
465
+ bottlenecked_hidden_states = self.input(hidden_states)
466
+ if self.use_bottleneck_attention:
467
+ return (bottlenecked_hidden_states,) * 4
468
+ elif self.key_query_shared_bottleneck:
469
+ shared_attention_input = self.attention(hidden_states)
470
+ return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
471
+ else:
472
+ return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
473
+
474
+
475
+ class FFNOutput(nn.Module):
476
+ def __init__(self, config):
477
+ super().__init__()
478
+ self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
479
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
480
+
481
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
482
+ layer_outputs = self.dense(hidden_states)
483
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
484
+ return layer_outputs
485
+
486
+
487
+ class FFNLayer(nn.Module):
488
+ def __init__(self, config):
489
+ super().__init__()
490
+ self.intermediate = MobileBertIntermediate(config)
491
+ self.output = FFNOutput(config)
492
+
493
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
494
+ intermediate_output = self.intermediate(hidden_states)
495
+ layer_outputs = self.output(intermediate_output, hidden_states)
496
+ return layer_outputs
497
+
498
+
499
+ class MobileBertLayer(nn.Module):
500
+ def __init__(self, config):
501
+ super().__init__()
502
+ self.use_bottleneck = config.use_bottleneck
503
+ self.num_feedforward_networks = config.num_feedforward_networks
504
+
505
+ self.attention = MobileBertAttention(config)
506
+ self.intermediate = MobileBertIntermediate(config)
507
+ self.output = MobileBertOutput(config)
508
+ if self.use_bottleneck:
509
+ self.bottleneck = Bottleneck(config)
510
+ if config.num_feedforward_networks > 1:
511
+ self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
512
+
513
+ def forward(
514
+ self,
515
+ hidden_states: torch.Tensor,
516
+ attention_mask: Optional[torch.FloatTensor] = None,
517
+ head_mask: Optional[torch.FloatTensor] = None,
518
+ output_attentions: Optional[bool] = None,
519
+ ) -> Tuple[torch.Tensor]:
520
+ if self.use_bottleneck:
521
+ query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
522
+ else:
523
+ query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
524
+
525
+ self_attention_outputs = self.attention(
526
+ query_tensor,
527
+ key_tensor,
528
+ value_tensor,
529
+ layer_input,
530
+ attention_mask,
531
+ head_mask,
532
+ output_attentions=output_attentions,
533
+ )
534
+ attention_output = self_attention_outputs[0]
535
+ s = (attention_output,)
536
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
537
+
538
+ if self.num_feedforward_networks != 1:
539
+ for i, ffn_module in enumerate(self.ffn):
540
+ attention_output = ffn_module(attention_output)
541
+ s += (attention_output,)
542
+
543
+ intermediate_output = self.intermediate(attention_output)
544
+ layer_output = self.output(intermediate_output, attention_output, hidden_states)
545
+ outputs = (
546
+ (layer_output,)
547
+ + outputs
548
+ + (
549
+ torch.tensor(1000),
550
+ query_tensor,
551
+ key_tensor,
552
+ value_tensor,
553
+ layer_input,
554
+ attention_output,
555
+ intermediate_output,
556
+ )
557
+ + s
558
+ )
559
+ return outputs
560
+
561
+
562
+ class MobileBertEncoder(nn.Module):
563
+ def __init__(self, config):
564
+ super().__init__()
565
+ self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
566
+
567
+ def forward(
568
+ self,
569
+ hidden_states: torch.Tensor,
570
+ attention_mask: Optional[torch.FloatTensor] = None,
571
+ head_mask: Optional[torch.FloatTensor] = None,
572
+ output_attentions: Optional[bool] = False,
573
+ output_hidden_states: Optional[bool] = False,
574
+ return_dict: Optional[bool] = True,
575
+ ) -> Union[Tuple, BaseModelOutput]:
576
+ all_hidden_states = () if output_hidden_states else None
577
+ all_attentions = () if output_attentions else None
578
+ for i, layer_module in enumerate(self.layer):
579
+ if output_hidden_states:
580
+ all_hidden_states = all_hidden_states + (hidden_states,)
581
+
582
+ layer_outputs = layer_module(
583
+ hidden_states,
584
+ attention_mask,
585
+ head_mask[i],
586
+ output_attentions,
587
+ )
588
+ hidden_states = layer_outputs[0]
589
+
590
+ if output_attentions:
591
+ all_attentions = all_attentions + (layer_outputs[1],)
592
+
593
+ # Add last layer
594
+ if output_hidden_states:
595
+ all_hidden_states = all_hidden_states + (hidden_states,)
596
+
597
+ if not return_dict:
598
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
599
+ return BaseModelOutput(
600
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
601
+ )
602
+
603
+
604
+ class MobileBertPooler(nn.Module):
605
+ def __init__(self, config):
606
+ super().__init__()
607
+ self.do_activate = config.classifier_activation
608
+ if self.do_activate:
609
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
610
+
611
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
612
+ # We "pool" the model by simply taking the hidden state corresponding
613
+ # to the first token.
614
+ first_token_tensor = hidden_states[:, 0]
615
+ if not self.do_activate:
616
+ return first_token_tensor
617
+ else:
618
+ pooled_output = self.dense(first_token_tensor)
619
+ pooled_output = torch.tanh(pooled_output)
620
+ return pooled_output
621
+
622
+
623
+ class MobileBertPredictionHeadTransform(nn.Module):
624
+ def __init__(self, config):
625
+ super().__init__()
626
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
627
+ if isinstance(config.hidden_act, str):
628
+ self.transform_act_fn = ACT2FN[config.hidden_act]
629
+ else:
630
+ self.transform_act_fn = config.hidden_act
631
+ self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
632
+
633
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
634
+ hidden_states = self.dense(hidden_states)
635
+ hidden_states = self.transform_act_fn(hidden_states)
636
+ hidden_states = self.LayerNorm(hidden_states)
637
+ return hidden_states
638
+
639
+
640
+ class MobileBertLMPredictionHead(nn.Module):
641
+ def __init__(self, config):
642
+ super().__init__()
643
+ self.transform = MobileBertPredictionHeadTransform(config)
644
+ # The output weights are the same as the input embeddings, but there is
645
+ # an output-only bias for each token.
646
+ self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
647
+ self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
648
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
649
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
650
+ self.decoder.bias = self.bias
651
+
652
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
653
+ hidden_states = self.transform(hidden_states)
654
+ hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
655
+ hidden_states += self.decoder.bias
656
+ return hidden_states
657
+
658
+
659
+ class MobileBertOnlyMLMHead(nn.Module):
660
+ def __init__(self, config):
661
+ super().__init__()
662
+ self.predictions = MobileBertLMPredictionHead(config)
663
+
664
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
665
+ prediction_scores = self.predictions(sequence_output)
666
+ return prediction_scores
667
+
668
+
669
+ class MobileBertPreTrainingHeads(nn.Module):
670
+ def __init__(self, config):
671
+ super().__init__()
672
+ self.predictions = MobileBertLMPredictionHead(config)
673
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
674
+
675
+ def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> Tuple[torch.Tensor]:
676
+ prediction_scores = self.predictions(sequence_output)
677
+ seq_relationship_score = self.seq_relationship(pooled_output)
678
+ return prediction_scores, seq_relationship_score
679
+
680
+
681
+ class MobileBertPreTrainedModel(PreTrainedModel):
682
+ """
683
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
684
+ models.
685
+ """
686
+
687
+ config_class = MobileBertConfig
688
+ pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST
689
+ load_tf_weights = load_tf_weights_in_mobilebert
690
+ base_model_prefix = "mobilebert"
691
+
692
+ def _init_weights(self, module):
693
+ """Initialize the weights"""
694
+ if isinstance(module, nn.Linear):
695
+ # Slightly different from the TF version which uses truncated_normal for initialization
696
+ # cf https://github.com/pytorch/pytorch/pull/5617
697
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
698
+ if module.bias is not None:
699
+ module.bias.data.zero_()
700
+ elif isinstance(module, nn.Embedding):
701
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
702
+ if module.padding_idx is not None:
703
+ module.weight.data[module.padding_idx].zero_()
704
+ elif isinstance(module, (nn.LayerNorm, NoNorm)):
705
+ module.bias.data.zero_()
706
+ module.weight.data.fill_(1.0)
707
+
708
+
709
+ @dataclass
710
+ class MobileBertForPreTrainingOutput(ModelOutput):
711
+ """
712
+ Output type of [`MobileBertForPreTraining`].
713
+
714
+ Args:
715
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
716
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
717
+ (classification) loss.
718
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
719
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
720
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
721
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
722
+ before SoftMax).
723
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
724
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
725
+ shape `(batch_size, sequence_length, hidden_size)`.
726
+
727
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
728
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
729
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
730
+ sequence_length)`.
731
+
732
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
733
+ heads.
734
+ """
735
+
736
+ loss: Optional[torch.FloatTensor] = None
737
+ prediction_logits: torch.FloatTensor = None
738
+ seq_relationship_logits: torch.FloatTensor = None
739
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
740
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
741
+
742
+
743
+ MOBILEBERT_START_DOCSTRING = r"""
744
+
745
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
746
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
747
+ etc.)
748
+
749
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
750
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
751
+ and behavior.
752
+
753
+ Parameters:
754
+ config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
755
+ Initializing with a config file does not load the weights associated with the model, only the
756
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
757
+ """
758
+
759
+ MOBILEBERT_INPUTS_DOCSTRING = r"""
760
+ Args:
761
+ input_ids (`torch.LongTensor` of shape `({0})`):
762
+ Indices of input sequence tokens in the vocabulary.
763
+
764
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
765
+ [`PreTrainedTokenizer.__call__`] for details.
766
+
767
+ [What are input IDs?](../glossary#input-ids)
768
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
769
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
770
+
771
+ - 1 for tokens that are **not masked**,
772
+ - 0 for tokens that are **masked**.
773
+
774
+ [What are attention masks?](../glossary#attention-mask)
775
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
776
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
777
+ 1]`:
778
+
779
+ - 0 corresponds to a *sentence A* token,
780
+ - 1 corresponds to a *sentence B* token.
781
+
782
+ [What are token type IDs?](../glossary#token-type-ids)
783
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
784
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
785
+ config.max_position_embeddings - 1]`.
786
+
787
+ [What are position IDs?](../glossary#position-ids)
788
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
789
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
790
+
791
+ - 1 indicates the head is **not masked**,
792
+ - 0 indicates the head is **masked**.
793
+
794
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
795
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
796
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
797
+ model's internal embedding lookup matrix.
798
+ output_attentions (`bool`, *optional*):
799
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
800
+ tensors for more detail.
801
+ output_hidden_states (`bool`, *optional*):
802
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
803
+ more detail.
804
+ return_dict (`bool`, *optional*):
805
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
806
+ """
807
+
808
+
809
+ @add_start_docstrings(
810
+ "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
811
+ MOBILEBERT_START_DOCSTRING,
812
+ )
813
+ class MobileBertModel(MobileBertPreTrainedModel):
814
+ """
815
+ https://arxiv.org/pdf/2004.02984.pdf
816
+ """
817
+
818
+ def __init__(self, config, add_pooling_layer=True):
819
+ super().__init__(config)
820
+ self.config = config
821
+ self.embeddings = MobileBertEmbeddings(config)
822
+ self.encoder = MobileBertEncoder(config)
823
+
824
+ self.pooler = MobileBertPooler(config) if add_pooling_layer else None
825
+
826
+ # Initialize weights and apply final processing
827
+ self.post_init()
828
+
829
+ def get_input_embeddings(self):
830
+ return self.embeddings.word_embeddings
831
+
832
+ def set_input_embeddings(self, value):
833
+ self.embeddings.word_embeddings = value
834
+
835
+ def _prune_heads(self, heads_to_prune):
836
+ """
837
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
838
+ class PreTrainedModel
839
+ """
840
+ for layer, heads in heads_to_prune.items():
841
+ self.encoder.layer[layer].attention.prune_heads(heads)
842
+
843
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
844
+ @add_code_sample_docstrings(
845
+ checkpoint=_CHECKPOINT_FOR_DOC,
846
+ output_type=BaseModelOutputWithPooling,
847
+ config_class=_CONFIG_FOR_DOC,
848
+ )
849
+ def forward(
850
+ self,
851
+ input_ids: Optional[torch.LongTensor] = None,
852
+ attention_mask: Optional[torch.FloatTensor] = None,
853
+ token_type_ids: Optional[torch.LongTensor] = None,
854
+ position_ids: Optional[torch.LongTensor] = None,
855
+ head_mask: Optional[torch.FloatTensor] = None,
856
+ inputs_embeds: Optional[torch.FloatTensor] = None,
857
+ output_hidden_states: Optional[bool] = None,
858
+ output_attentions: Optional[bool] = None,
859
+ return_dict: Optional[bool] = None,
860
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
861
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
862
+ output_hidden_states = (
863
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
864
+ )
865
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
866
+
867
+ if input_ids is not None and inputs_embeds is not None:
868
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
869
+ elif input_ids is not None:
870
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
871
+ input_shape = input_ids.size()
872
+ elif inputs_embeds is not None:
873
+ input_shape = inputs_embeds.size()[:-1]
874
+ else:
875
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
876
+
877
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
878
+
879
+ if attention_mask is None:
880
+ attention_mask = torch.ones(input_shape, device=device)
881
+ if token_type_ids is None:
882
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
883
+
884
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
885
+ # ourselves in which case we just need to make it broadcastable to all heads.
886
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
887
+
888
+ # Prepare head mask if needed
889
+ # 1.0 in head_mask indicate we keep the head
890
+ # attention_probs has shape bsz x n_heads x N x N
891
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
892
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
893
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
894
+
895
+ embedding_output = self.embeddings(
896
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
897
+ )
898
+ encoder_outputs = self.encoder(
899
+ embedding_output,
900
+ attention_mask=extended_attention_mask,
901
+ head_mask=head_mask,
902
+ output_attentions=output_attentions,
903
+ output_hidden_states=output_hidden_states,
904
+ return_dict=return_dict,
905
+ )
906
+ sequence_output = encoder_outputs[0]
907
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
908
+
909
+ if not return_dict:
910
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
911
+
912
+ return BaseModelOutputWithPooling(
913
+ last_hidden_state=sequence_output,
914
+ pooler_output=pooled_output,
915
+ hidden_states=encoder_outputs.hidden_states,
916
+ attentions=encoder_outputs.attentions,
917
+ )
918
+
919
+
920
+ @add_start_docstrings(
921
+ """
922
+ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
923
+ `next sentence prediction (classification)` head.
924
+ """,
925
+ MOBILEBERT_START_DOCSTRING,
926
+ )
927
+ class MobileBertForPreTraining(MobileBertPreTrainedModel):
928
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
929
+
930
+ def __init__(self, config):
931
+ super().__init__(config)
932
+ self.mobilebert = MobileBertModel(config)
933
+ self.cls = MobileBertPreTrainingHeads(config)
934
+
935
+ # Initialize weights and apply final processing
936
+ self.post_init()
937
+
938
+ def get_output_embeddings(self):
939
+ return self.cls.predictions.decoder
940
+
941
+ def set_output_embeddings(self, new_embeddigs):
942
+ self.cls.predictions.decoder = new_embeddigs
943
+
944
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
945
+ # resize dense output embedings at first
946
+ self.cls.predictions.dense = self._get_resized_lm_head(
947
+ self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
948
+ )
949
+
950
+ return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
951
+
952
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
953
+ @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
954
+ def forward(
955
+ self,
956
+ input_ids: Optional[torch.LongTensor] = None,
957
+ attention_mask: Optional[torch.FloatTensor] = None,
958
+ token_type_ids: Optional[torch.LongTensor] = None,
959
+ position_ids: Optional[torch.LongTensor] = None,
960
+ head_mask: Optional[torch.FloatTensor] = None,
961
+ inputs_embeds: Optional[torch.FloatTensor] = None,
962
+ labels: Optional[torch.LongTensor] = None,
963
+ next_sentence_label: Optional[torch.LongTensor] = None,
964
+ output_attentions: Optional[torch.FloatTensor] = None,
965
+ output_hidden_states: Optional[torch.FloatTensor] = None,
966
+ return_dict: Optional[torch.FloatTensor] = None,
967
+ ) -> Union[Tuple, MobileBertForPreTrainingOutput]:
968
+ r"""
969
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
970
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
971
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
972
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
973
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
974
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
975
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
976
+
977
+ - 0 indicates sequence B is a continuation of sequence A,
978
+ - 1 indicates sequence B is a random sequence.
979
+
980
+ Returns:
981
+
982
+ Examples:
983
+
984
+ ```python
985
+ >>> from transformers import AutoTokenizer, MobileBertForPreTraining
986
+ >>> import torch
987
+
988
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
989
+ >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
990
+
991
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
992
+ >>> # Batch size 1
993
+ >>> outputs = model(input_ids)
994
+
995
+ >>> prediction_logits = outputs.prediction_logits
996
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
997
+ ```"""
998
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
999
+
1000
+ outputs = self.mobilebert(
1001
+ input_ids,
1002
+ attention_mask=attention_mask,
1003
+ token_type_ids=token_type_ids,
1004
+ position_ids=position_ids,
1005
+ head_mask=head_mask,
1006
+ inputs_embeds=inputs_embeds,
1007
+ output_attentions=output_attentions,
1008
+ output_hidden_states=output_hidden_states,
1009
+ return_dict=return_dict,
1010
+ )
1011
+ sequence_output, pooled_output = outputs[:2]
1012
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1013
+
1014
+ total_loss = None
1015
+ if labels is not None and next_sentence_label is not None:
1016
+ loss_fct = CrossEntropyLoss()
1017
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1018
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1019
+ total_loss = masked_lm_loss + next_sentence_loss
1020
+
1021
+ if not return_dict:
1022
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1023
+ return ((total_loss,) + output) if total_loss is not None else output
1024
+
1025
+ return MobileBertForPreTrainingOutput(
1026
+ loss=total_loss,
1027
+ prediction_logits=prediction_scores,
1028
+ seq_relationship_logits=seq_relationship_score,
1029
+ hidden_states=outputs.hidden_states,
1030
+ attentions=outputs.attentions,
1031
+ )
1032
+
1033
+
1034
+ @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
1035
+ class MobileBertForMaskedLM(MobileBertPreTrainedModel):
1036
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
1037
+
1038
+ def __init__(self, config):
1039
+ super().__init__(config)
1040
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
1041
+ self.cls = MobileBertOnlyMLMHead(config)
1042
+ self.config = config
1043
+
1044
+ # Initialize weights and apply final processing
1045
+ self.post_init()
1046
+
1047
+ def get_output_embeddings(self):
1048
+ return self.cls.predictions.decoder
1049
+
1050
+ def set_output_embeddings(self, new_embeddigs):
1051
+ self.cls.predictions.decoder = new_embeddigs
1052
+
1053
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1054
+ # resize dense output embedings at first
1055
+ self.cls.predictions.dense = self._get_resized_lm_head(
1056
+ self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
1057
+ )
1058
+ return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
1059
+
1060
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1061
+ @add_code_sample_docstrings(
1062
+ checkpoint=_CHECKPOINT_FOR_DOC,
1063
+ output_type=MaskedLMOutput,
1064
+ config_class=_CONFIG_FOR_DOC,
1065
+ expected_output="'paris'",
1066
+ expected_loss=0.57,
1067
+ )
1068
+ def forward(
1069
+ self,
1070
+ input_ids: Optional[torch.LongTensor] = None,
1071
+ attention_mask: Optional[torch.FloatTensor] = None,
1072
+ token_type_ids: Optional[torch.LongTensor] = None,
1073
+ position_ids: Optional[torch.LongTensor] = None,
1074
+ head_mask: Optional[torch.FloatTensor] = None,
1075
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1076
+ labels: Optional[torch.LongTensor] = None,
1077
+ output_attentions: Optional[bool] = None,
1078
+ output_hidden_states: Optional[bool] = None,
1079
+ return_dict: Optional[bool] = None,
1080
+ ) -> Union[Tuple, MaskedLMOutput]:
1081
+ r"""
1082
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1083
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1084
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1085
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1086
+ """
1087
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1088
+
1089
+ outputs = self.mobilebert(
1090
+ input_ids,
1091
+ attention_mask=attention_mask,
1092
+ token_type_ids=token_type_ids,
1093
+ position_ids=position_ids,
1094
+ head_mask=head_mask,
1095
+ inputs_embeds=inputs_embeds,
1096
+ output_attentions=output_attentions,
1097
+ output_hidden_states=output_hidden_states,
1098
+ return_dict=return_dict,
1099
+ )
1100
+
1101
+ sequence_output = outputs[0]
1102
+ prediction_scores = self.cls(sequence_output)
1103
+
1104
+ masked_lm_loss = None
1105
+ if labels is not None:
1106
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1107
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1108
+
1109
+ if not return_dict:
1110
+ output = (prediction_scores,) + outputs[2:]
1111
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1112
+
1113
+ return MaskedLMOutput(
1114
+ loss=masked_lm_loss,
1115
+ logits=prediction_scores,
1116
+ hidden_states=outputs.hidden_states,
1117
+ attentions=outputs.attentions,
1118
+ )
1119
+
1120
+
1121
+ class MobileBertOnlyNSPHead(nn.Module):
1122
+ def __init__(self, config):
1123
+ super().__init__()
1124
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
1125
+
1126
+ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
1127
+ seq_relationship_score = self.seq_relationship(pooled_output)
1128
+ return seq_relationship_score
1129
+
1130
+
1131
+ @add_start_docstrings(
1132
+ """MobileBert Model with a `next sentence prediction (classification)` head on top.""",
1133
+ MOBILEBERT_START_DOCSTRING,
1134
+ )
1135
+ class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
1136
+ def __init__(self, config):
1137
+ super().__init__(config)
1138
+
1139
+ self.mobilebert = MobileBertModel(config)
1140
+ self.cls = MobileBertOnlyNSPHead(config)
1141
+
1142
+ # Initialize weights and apply final processing
1143
+ self.post_init()
1144
+
1145
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1146
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1147
+ def forward(
1148
+ self,
1149
+ input_ids: Optional[torch.LongTensor] = None,
1150
+ attention_mask: Optional[torch.FloatTensor] = None,
1151
+ token_type_ids: Optional[torch.LongTensor] = None,
1152
+ position_ids: Optional[torch.LongTensor] = None,
1153
+ head_mask: Optional[torch.FloatTensor] = None,
1154
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1155
+ labels: Optional[torch.LongTensor] = None,
1156
+ output_attentions: Optional[bool] = None,
1157
+ output_hidden_states: Optional[bool] = None,
1158
+ return_dict: Optional[bool] = None,
1159
+ **kwargs,
1160
+ ) -> Union[Tuple, NextSentencePredictorOutput]:
1161
+ r"""
1162
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1163
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1164
+ (see `input_ids` docstring) Indices should be in `[0, 1]`.
1165
+
1166
+ - 0 indicates sequence B is a continuation of sequence A,
1167
+ - 1 indicates sequence B is a random sequence.
1168
+
1169
+ Returns:
1170
+
1171
+ Examples:
1172
+
1173
+ ```python
1174
+ >>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction
1175
+ >>> import torch
1176
+
1177
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
1178
+ >>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
1179
+
1180
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1181
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1182
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1183
+
1184
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1185
+ >>> loss = outputs.loss
1186
+ >>> logits = outputs.logits
1187
+ ```"""
1188
+
1189
+ if "next_sentence_label" in kwargs:
1190
+ warnings.warn(
1191
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1192
+ " `labels` instead.",
1193
+ FutureWarning,
1194
+ )
1195
+ labels = kwargs.pop("next_sentence_label")
1196
+
1197
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1198
+
1199
+ outputs = self.mobilebert(
1200
+ input_ids,
1201
+ attention_mask=attention_mask,
1202
+ token_type_ids=token_type_ids,
1203
+ position_ids=position_ids,
1204
+ head_mask=head_mask,
1205
+ inputs_embeds=inputs_embeds,
1206
+ output_attentions=output_attentions,
1207
+ output_hidden_states=output_hidden_states,
1208
+ return_dict=return_dict,
1209
+ )
1210
+
1211
+ pooled_output = outputs[1]
1212
+ seq_relationship_score = self.cls(pooled_output)
1213
+
1214
+ next_sentence_loss = None
1215
+ if labels is not None:
1216
+ loss_fct = CrossEntropyLoss()
1217
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
1218
+
1219
+ if not return_dict:
1220
+ output = (seq_relationship_score,) + outputs[2:]
1221
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1222
+
1223
+ return NextSentencePredictorOutput(
1224
+ loss=next_sentence_loss,
1225
+ logits=seq_relationship_score,
1226
+ hidden_states=outputs.hidden_states,
1227
+ attentions=outputs.attentions,
1228
+ )
1229
+
1230
+
1231
+ @add_start_docstrings(
1232
+ """
1233
+ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1234
+ pooled output) e.g. for GLUE tasks.
1235
+ """,
1236
+ MOBILEBERT_START_DOCSTRING,
1237
+ )
1238
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
1239
+ class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
1240
+ def __init__(self, config):
1241
+ super().__init__(config)
1242
+ self.num_labels = config.num_labels
1243
+ self.config = config
1244
+
1245
+ self.mobilebert = MobileBertModel(config)
1246
+ classifier_dropout = (
1247
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1248
+ )
1249
+ self.dropout = nn.Dropout(classifier_dropout)
1250
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1251
+
1252
+ # Initialize weights and apply final processing
1253
+ self.post_init()
1254
+
1255
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1256
+ @add_code_sample_docstrings(
1257
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1258
+ output_type=SequenceClassifierOutput,
1259
+ config_class=_CONFIG_FOR_DOC,
1260
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1261
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1262
+ )
1263
+ def forward(
1264
+ self,
1265
+ input_ids: Optional[torch.Tensor] = None,
1266
+ attention_mask: Optional[torch.Tensor] = None,
1267
+ token_type_ids: Optional[torch.Tensor] = None,
1268
+ position_ids: Optional[torch.Tensor] = None,
1269
+ head_mask: Optional[torch.Tensor] = None,
1270
+ inputs_embeds: Optional[torch.Tensor] = None,
1271
+ labels: Optional[torch.Tensor] = None,
1272
+ output_attentions: Optional[bool] = None,
1273
+ output_hidden_states: Optional[bool] = None,
1274
+ return_dict: Optional[bool] = None,
1275
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1276
+ r"""
1277
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1278
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1279
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1280
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1281
+ """
1282
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1283
+
1284
+ outputs = self.mobilebert(
1285
+ input_ids,
1286
+ attention_mask=attention_mask,
1287
+ token_type_ids=token_type_ids,
1288
+ position_ids=position_ids,
1289
+ head_mask=head_mask,
1290
+ inputs_embeds=inputs_embeds,
1291
+ output_attentions=output_attentions,
1292
+ output_hidden_states=output_hidden_states,
1293
+ return_dict=return_dict,
1294
+ )
1295
+
1296
+ pooled_output = outputs[1]
1297
+
1298
+ pooled_output = self.dropout(pooled_output)
1299
+ logits = self.classifier(pooled_output)
1300
+
1301
+ loss = None
1302
+ if labels is not None:
1303
+ if self.config.problem_type is None:
1304
+ if self.num_labels == 1:
1305
+ self.config.problem_type = "regression"
1306
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1307
+ self.config.problem_type = "single_label_classification"
1308
+ else:
1309
+ self.config.problem_type = "multi_label_classification"
1310
+
1311
+ if self.config.problem_type == "regression":
1312
+ loss_fct = MSELoss()
1313
+ if self.num_labels == 1:
1314
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1315
+ else:
1316
+ loss = loss_fct(logits, labels)
1317
+ elif self.config.problem_type == "single_label_classification":
1318
+ loss_fct = CrossEntropyLoss()
1319
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1320
+ elif self.config.problem_type == "multi_label_classification":
1321
+ loss_fct = BCEWithLogitsLoss()
1322
+ loss = loss_fct(logits, labels)
1323
+ if not return_dict:
1324
+ output = (logits,) + outputs[2:]
1325
+ return ((loss,) + output) if loss is not None else output
1326
+
1327
+ return SequenceClassifierOutput(
1328
+ loss=loss,
1329
+ logits=logits,
1330
+ hidden_states=outputs.hidden_states,
1331
+ attentions=outputs.attentions,
1332
+ )
1333
+
1334
+
1335
+ @add_start_docstrings(
1336
+ """
1337
+ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1338
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1339
+ """,
1340
+ MOBILEBERT_START_DOCSTRING,
1341
+ )
1342
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing
1343
+ class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
1344
+ def __init__(self, config):
1345
+ super().__init__(config)
1346
+ self.num_labels = config.num_labels
1347
+
1348
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
1349
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1350
+
1351
+ # Initialize weights and apply final processing
1352
+ self.post_init()
1353
+
1354
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1355
+ @add_code_sample_docstrings(
1356
+ checkpoint=_CHECKPOINT_FOR_QA,
1357
+ output_type=QuestionAnsweringModelOutput,
1358
+ config_class=_CONFIG_FOR_DOC,
1359
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1360
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1361
+ expected_output=_QA_EXPECTED_OUTPUT,
1362
+ expected_loss=_QA_EXPECTED_LOSS,
1363
+ )
1364
+ def forward(
1365
+ self,
1366
+ input_ids: Optional[torch.Tensor] = None,
1367
+ attention_mask: Optional[torch.Tensor] = None,
1368
+ token_type_ids: Optional[torch.Tensor] = None,
1369
+ position_ids: Optional[torch.Tensor] = None,
1370
+ head_mask: Optional[torch.Tensor] = None,
1371
+ inputs_embeds: Optional[torch.Tensor] = None,
1372
+ start_positions: Optional[torch.Tensor] = None,
1373
+ end_positions: Optional[torch.Tensor] = None,
1374
+ output_attentions: Optional[bool] = None,
1375
+ output_hidden_states: Optional[bool] = None,
1376
+ return_dict: Optional[bool] = None,
1377
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1378
+ r"""
1379
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1380
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1381
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1382
+ are not taken into account for computing the loss.
1383
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1384
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1385
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1386
+ are not taken into account for computing the loss.
1387
+ """
1388
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1389
+
1390
+ outputs = self.mobilebert(
1391
+ input_ids,
1392
+ attention_mask=attention_mask,
1393
+ token_type_ids=token_type_ids,
1394
+ position_ids=position_ids,
1395
+ head_mask=head_mask,
1396
+ inputs_embeds=inputs_embeds,
1397
+ output_attentions=output_attentions,
1398
+ output_hidden_states=output_hidden_states,
1399
+ return_dict=return_dict,
1400
+ )
1401
+
1402
+ sequence_output = outputs[0]
1403
+
1404
+ logits = self.qa_outputs(sequence_output)
1405
+ start_logits, end_logits = logits.split(1, dim=-1)
1406
+ start_logits = start_logits.squeeze(-1).contiguous()
1407
+ end_logits = end_logits.squeeze(-1).contiguous()
1408
+
1409
+ total_loss = None
1410
+ if start_positions is not None and end_positions is not None:
1411
+ # If we are on multi-GPU, split add a dimension
1412
+ if len(start_positions.size()) > 1:
1413
+ start_positions = start_positions.squeeze(-1)
1414
+ if len(end_positions.size()) > 1:
1415
+ end_positions = end_positions.squeeze(-1)
1416
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1417
+ ignored_index = start_logits.size(1)
1418
+ start_positions = start_positions.clamp(0, ignored_index)
1419
+ end_positions = end_positions.clamp(0, ignored_index)
1420
+
1421
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1422
+ start_loss = loss_fct(start_logits, start_positions)
1423
+ end_loss = loss_fct(end_logits, end_positions)
1424
+ total_loss = (start_loss + end_loss) / 2
1425
+
1426
+ if not return_dict:
1427
+ output = (start_logits, end_logits) + outputs[2:]
1428
+ return ((total_loss,) + output) if total_loss is not None else output
1429
+
1430
+ return QuestionAnsweringModelOutput(
1431
+ loss=total_loss,
1432
+ start_logits=start_logits,
1433
+ end_logits=end_logits,
1434
+ hidden_states=outputs.hidden_states,
1435
+ attentions=outputs.attentions,
1436
+ )
1437
+
1438
+
1439
+ @add_start_docstrings(
1440
+ """
1441
+ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1442
+ a softmax) e.g. for RocStories/SWAG tasks.
1443
+ """,
1444
+ MOBILEBERT_START_DOCSTRING,
1445
+ )
1446
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing
1447
+ class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
1448
+ def __init__(self, config):
1449
+ super().__init__(config)
1450
+
1451
+ self.mobilebert = MobileBertModel(config)
1452
+ classifier_dropout = (
1453
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1454
+ )
1455
+ self.dropout = nn.Dropout(classifier_dropout)
1456
+ self.classifier = nn.Linear(config.hidden_size, 1)
1457
+
1458
+ # Initialize weights and apply final processing
1459
+ self.post_init()
1460
+
1461
+ @add_start_docstrings_to_model_forward(
1462
+ MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1463
+ )
1464
+ @add_code_sample_docstrings(
1465
+ checkpoint=_CHECKPOINT_FOR_DOC,
1466
+ output_type=MultipleChoiceModelOutput,
1467
+ config_class=_CONFIG_FOR_DOC,
1468
+ )
1469
+ def forward(
1470
+ self,
1471
+ input_ids: Optional[torch.Tensor] = None,
1472
+ attention_mask: Optional[torch.Tensor] = None,
1473
+ token_type_ids: Optional[torch.Tensor] = None,
1474
+ position_ids: Optional[torch.Tensor] = None,
1475
+ head_mask: Optional[torch.Tensor] = None,
1476
+ inputs_embeds: Optional[torch.Tensor] = None,
1477
+ labels: Optional[torch.Tensor] = None,
1478
+ output_attentions: Optional[bool] = None,
1479
+ output_hidden_states: Optional[bool] = None,
1480
+ return_dict: Optional[bool] = None,
1481
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1482
+ r"""
1483
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1484
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1485
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1486
+ `input_ids` above)
1487
+ """
1488
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1489
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1490
+
1491
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1492
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1493
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1494
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1495
+ inputs_embeds = (
1496
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1497
+ if inputs_embeds is not None
1498
+ else None
1499
+ )
1500
+
1501
+ outputs = self.mobilebert(
1502
+ input_ids,
1503
+ attention_mask=attention_mask,
1504
+ token_type_ids=token_type_ids,
1505
+ position_ids=position_ids,
1506
+ head_mask=head_mask,
1507
+ inputs_embeds=inputs_embeds,
1508
+ output_attentions=output_attentions,
1509
+ output_hidden_states=output_hidden_states,
1510
+ return_dict=return_dict,
1511
+ )
1512
+
1513
+ pooled_output = outputs[1]
1514
+
1515
+ pooled_output = self.dropout(pooled_output)
1516
+ logits = self.classifier(pooled_output)
1517
+ reshaped_logits = logits.view(-1, num_choices)
1518
+
1519
+ loss = None
1520
+ if labels is not None:
1521
+ loss_fct = CrossEntropyLoss()
1522
+ loss = loss_fct(reshaped_logits, labels)
1523
+
1524
+ if not return_dict:
1525
+ output = (reshaped_logits,) + outputs[2:]
1526
+ return ((loss,) + output) if loss is not None else output
1527
+
1528
+ return MultipleChoiceModelOutput(
1529
+ loss=loss,
1530
+ logits=reshaped_logits,
1531
+ hidden_states=outputs.hidden_states,
1532
+ attentions=outputs.attentions,
1533
+ )
1534
+
1535
+
1536
+ @add_start_docstrings(
1537
+ """
1538
+ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1539
+ for Named-Entity-Recognition (NER) tasks.
1540
+ """,
1541
+ MOBILEBERT_START_DOCSTRING,
1542
+ )
1543
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing
1544
+ class MobileBertForTokenClassification(MobileBertPreTrainedModel):
1545
+ def __init__(self, config):
1546
+ super().__init__(config)
1547
+ self.num_labels = config.num_labels
1548
+
1549
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
1550
+ classifier_dropout = (
1551
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1552
+ )
1553
+ self.dropout = nn.Dropout(classifier_dropout)
1554
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1555
+
1556
+ # Initialize weights and apply final processing
1557
+ self.post_init()
1558
+
1559
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1560
+ @add_code_sample_docstrings(
1561
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1562
+ output_type=TokenClassifierOutput,
1563
+ config_class=_CONFIG_FOR_DOC,
1564
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1565
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1566
+ )
1567
+ def forward(
1568
+ self,
1569
+ input_ids: Optional[torch.Tensor] = None,
1570
+ attention_mask: Optional[torch.Tensor] = None,
1571
+ token_type_ids: Optional[torch.Tensor] = None,
1572
+ position_ids: Optional[torch.Tensor] = None,
1573
+ head_mask: Optional[torch.Tensor] = None,
1574
+ inputs_embeds: Optional[torch.Tensor] = None,
1575
+ labels: Optional[torch.Tensor] = None,
1576
+ output_attentions: Optional[bool] = None,
1577
+ output_hidden_states: Optional[bool] = None,
1578
+ return_dict: Optional[bool] = None,
1579
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1580
+ r"""
1581
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1582
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1583
+ """
1584
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1585
+
1586
+ outputs = self.mobilebert(
1587
+ input_ids,
1588
+ attention_mask=attention_mask,
1589
+ token_type_ids=token_type_ids,
1590
+ position_ids=position_ids,
1591
+ head_mask=head_mask,
1592
+ inputs_embeds=inputs_embeds,
1593
+ output_attentions=output_attentions,
1594
+ output_hidden_states=output_hidden_states,
1595
+ return_dict=return_dict,
1596
+ )
1597
+
1598
+ sequence_output = outputs[0]
1599
+
1600
+ sequence_output = self.dropout(sequence_output)
1601
+ logits = self.classifier(sequence_output)
1602
+
1603
+ loss = None
1604
+ if labels is not None:
1605
+ loss_fct = CrossEntropyLoss()
1606
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1607
+
1608
+ if not return_dict:
1609
+ output = (logits,) + outputs[2:]
1610
+ return ((loss,) + output) if loss is not None else output
1611
+
1612
+ return TokenClassifierOutput(
1613
+ loss=loss,
1614
+ logits=logits,
1615
+ hidden_states=outputs.hidden_states,
1616
+ attentions=outputs.attentions,
1617
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_tf_mobilebert.py ADDED
@@ -0,0 +1,1972 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 MobileBERT model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFBaseModelOutputWithPooling,
32
+ TFMaskedLMOutput,
33
+ TFMultipleChoiceModelOutput,
34
+ TFNextSentencePredictorOutput,
35
+ TFQuestionAnsweringModelOutput,
36
+ TFSequenceClassifierOutput,
37
+ TFTokenClassifierOutput,
38
+ )
39
+ from ...modeling_tf_utils import (
40
+ TFMaskedLanguageModelingLoss,
41
+ TFModelInputType,
42
+ TFMultipleChoiceLoss,
43
+ TFNextSentencePredictionLoss,
44
+ TFPreTrainedModel,
45
+ TFQuestionAnsweringLoss,
46
+ TFSequenceClassificationLoss,
47
+ TFTokenClassificationLoss,
48
+ get_initializer,
49
+ keras,
50
+ keras_serializable,
51
+ unpack_inputs,
52
+ )
53
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
54
+ from ...utils import (
55
+ ModelOutput,
56
+ add_code_sample_docstrings,
57
+ add_start_docstrings,
58
+ add_start_docstrings_to_model_forward,
59
+ logging,
60
+ replace_return_docstrings,
61
+ )
62
+ from .configuration_mobilebert import MobileBertConfig
63
+
64
+
65
+ logger = logging.get_logger(__name__)
66
+
67
+ _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
68
+ _CONFIG_FOR_DOC = "MobileBertConfig"
69
+
70
+ # TokenClassification docstring
71
+ _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "vumichien/mobilebert-finetuned-ner"
72
+ _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']"
73
+ _TOKEN_CLASS_EXPECTED_LOSS = 0.03
74
+
75
+ # QuestionAnswering docstring
76
+ _CHECKPOINT_FOR_QA = "vumichien/mobilebert-uncased-squad-v2"
77
+ _QA_EXPECTED_OUTPUT = "'a nice puppet'"
78
+ _QA_EXPECTED_LOSS = 3.98
79
+ _QA_TARGET_START_INDEX = 12
80
+ _QA_TARGET_END_INDEX = 13
81
+
82
+ # SequenceClassification docstring
83
+ _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "vumichien/emo-mobilebert"
84
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'others'"
85
+ _SEQ_CLASS_EXPECTED_LOSS = "4.72"
86
+
87
+ TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
88
+ "google/mobilebert-uncased",
89
+ # See all MobileBERT models at https://huggingface.co/models?filter=mobilebert
90
+ ]
91
+
92
+
93
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainingLoss
94
+ class TFMobileBertPreTrainingLoss:
95
+ """
96
+ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining
97
+ NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss
98
+ computation.
99
+ """
100
+
101
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
102
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
103
+
104
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
105
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
106
+ # make sure only labels that are not equal to -100
107
+ # are taken into account for the loss computation
108
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
109
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
110
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
111
+
112
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
113
+ unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1])
114
+ ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype)
115
+ masked_ns_loss = unmasked_ns_loss * ns_loss_mask
116
+
117
+ reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask)
118
+
119
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,))
120
+
121
+
122
+ class TFMobileBertIntermediate(keras.layers.Layer):
123
+ def __init__(self, config, **kwargs):
124
+ super().__init__(**kwargs)
125
+
126
+ self.dense = keras.layers.Dense(config.intermediate_size, name="dense")
127
+
128
+ if isinstance(config.hidden_act, str):
129
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
130
+ else:
131
+ self.intermediate_act_fn = config.hidden_act
132
+ self.config = config
133
+
134
+ def call(self, hidden_states):
135
+ hidden_states = self.dense(hidden_states)
136
+ hidden_states = self.intermediate_act_fn(hidden_states)
137
+
138
+ return hidden_states
139
+
140
+ def build(self, input_shape=None):
141
+ if self.built:
142
+ return
143
+ self.built = True
144
+ if getattr(self, "dense", None) is not None:
145
+ with tf.name_scope(self.dense.name):
146
+ self.dense.build([None, None, self.config.true_hidden_size])
147
+
148
+
149
+ class TFLayerNorm(keras.layers.LayerNormalization):
150
+ def __init__(self, feat_size, *args, **kwargs):
151
+ self.feat_size = feat_size
152
+ super().__init__(*args, **kwargs)
153
+
154
+ def build(self, input_shape=None):
155
+ super().build([None, None, self.feat_size])
156
+
157
+
158
+ class TFNoNorm(keras.layers.Layer):
159
+ def __init__(self, feat_size, epsilon=None, **kwargs):
160
+ super().__init__(**kwargs)
161
+ self.feat_size = feat_size
162
+
163
+ def build(self, input_shape):
164
+ self.bias = self.add_weight("bias", shape=[self.feat_size], initializer="zeros")
165
+ self.weight = self.add_weight("weight", shape=[self.feat_size], initializer="ones")
166
+ super().build(input_shape)
167
+
168
+ def call(self, inputs: tf.Tensor):
169
+ return inputs * self.weight + self.bias
170
+
171
+
172
+ NORM2FN = {"layer_norm": TFLayerNorm, "no_norm": TFNoNorm}
173
+
174
+
175
+ class TFMobileBertEmbeddings(keras.layers.Layer):
176
+ """Construct the embeddings from word, position and token_type embeddings."""
177
+
178
+ def __init__(self, config, **kwargs):
179
+ super().__init__(**kwargs)
180
+
181
+ self.trigram_input = config.trigram_input
182
+ self.embedding_size = config.embedding_size
183
+ self.config = config
184
+ self.hidden_size = config.hidden_size
185
+ self.max_position_embeddings = config.max_position_embeddings
186
+ self.initializer_range = config.initializer_range
187
+ self.embedding_transformation = keras.layers.Dense(config.hidden_size, name="embedding_transformation")
188
+
189
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
190
+ # any TensorFlow checkpoint file
191
+ self.LayerNorm = NORM2FN[config.normalization_type](
192
+ config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
193
+ )
194
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
195
+ self.embedded_input_size = self.embedding_size * (3 if self.trigram_input else 1)
196
+
197
+ def build(self, input_shape=None):
198
+ with tf.name_scope("word_embeddings"):
199
+ self.weight = self.add_weight(
200
+ name="weight",
201
+ shape=[self.config.vocab_size, self.embedding_size],
202
+ initializer=get_initializer(initializer_range=self.initializer_range),
203
+ )
204
+
205
+ with tf.name_scope("token_type_embeddings"):
206
+ self.token_type_embeddings = self.add_weight(
207
+ name="embeddings",
208
+ shape=[self.config.type_vocab_size, self.hidden_size],
209
+ initializer=get_initializer(initializer_range=self.initializer_range),
210
+ )
211
+
212
+ with tf.name_scope("position_embeddings"):
213
+ self.position_embeddings = self.add_weight(
214
+ name="embeddings",
215
+ shape=[self.max_position_embeddings, self.hidden_size],
216
+ initializer=get_initializer(initializer_range=self.initializer_range),
217
+ )
218
+
219
+ if self.built:
220
+ return
221
+ self.built = True
222
+ if getattr(self, "embedding_transformation", None) is not None:
223
+ with tf.name_scope(self.embedding_transformation.name):
224
+ self.embedding_transformation.build([None, None, self.embedded_input_size])
225
+ if getattr(self, "LayerNorm", None) is not None:
226
+ with tf.name_scope(self.LayerNorm.name):
227
+ self.LayerNorm.build(None)
228
+
229
+ def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
230
+ """
231
+ Applies embedding based on inputs tensor.
232
+
233
+ Returns:
234
+ final_embeddings (`tf.Tensor`): output embedding tensor.
235
+ """
236
+ assert not (input_ids is None and inputs_embeds is None)
237
+
238
+ if input_ids is not None:
239
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
240
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
241
+
242
+ input_shape = shape_list(inputs_embeds)[:-1]
243
+
244
+ if token_type_ids is None:
245
+ token_type_ids = tf.fill(dims=input_shape, value=0)
246
+
247
+ if self.trigram_input:
248
+ # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
249
+ # Devices (https://arxiv.org/abs/2004.02984)
250
+ #
251
+ # The embedding table in BERT models accounts for a substantial proportion of model size. To compress
252
+ # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
253
+ # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
254
+ # dimensional output.
255
+ inputs_embeds = tf.concat(
256
+ [
257
+ tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))),
258
+ inputs_embeds,
259
+ tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0))),
260
+ ],
261
+ axis=2,
262
+ )
263
+
264
+ if self.trigram_input or self.embedding_size != self.hidden_size:
265
+ inputs_embeds = self.embedding_transformation(inputs_embeds)
266
+
267
+ if position_ids is None:
268
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
269
+
270
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
271
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
272
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
273
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
274
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
275
+
276
+ return final_embeddings
277
+
278
+
279
+ class TFMobileBertSelfAttention(keras.layers.Layer):
280
+ def __init__(self, config, **kwargs):
281
+ super().__init__(**kwargs)
282
+ if config.hidden_size % config.num_attention_heads != 0:
283
+ raise ValueError(
284
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
285
+ f"heads ({config.num_attention_heads}"
286
+ )
287
+
288
+ self.num_attention_heads = config.num_attention_heads
289
+ self.output_attentions = config.output_attentions
290
+ assert config.hidden_size % config.num_attention_heads == 0
291
+ self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
292
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
293
+
294
+ self.query = keras.layers.Dense(
295
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
296
+ )
297
+ self.key = keras.layers.Dense(
298
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
299
+ )
300
+ self.value = keras.layers.Dense(
301
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
302
+ )
303
+
304
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
305
+ self.config = config
306
+
307
+ def transpose_for_scores(self, x, batch_size):
308
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
309
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
310
+ return tf.transpose(x, perm=[0, 2, 1, 3])
311
+
312
+ def call(
313
+ self, query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=False
314
+ ):
315
+ batch_size = shape_list(attention_mask)[0]
316
+ mixed_query_layer = self.query(query_tensor)
317
+ mixed_key_layer = self.key(key_tensor)
318
+ mixed_value_layer = self.value(value_tensor)
319
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
320
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
321
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
322
+
323
+ # Take the dot product between "query" and "key" to get the raw attention scores.
324
+ attention_scores = tf.matmul(
325
+ query_layer, key_layer, transpose_b=True
326
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
327
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
328
+ attention_scores = attention_scores / tf.math.sqrt(dk)
329
+
330
+ if attention_mask is not None:
331
+ # Apply the attention mask is (precomputed for all layers in TFMobileBertModel call() function)
332
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
333
+ attention_scores = attention_scores + attention_mask
334
+
335
+ # Normalize the attention scores to probabilities.
336
+ attention_probs = stable_softmax(attention_scores, axis=-1)
337
+
338
+ # This is actually dropping out entire tokens to attend to, which might
339
+ # seem a bit unusual, but is taken from the original Transformer paper.
340
+ attention_probs = self.dropout(attention_probs, training=training)
341
+
342
+ # Mask heads if we want to
343
+ if head_mask is not None:
344
+ attention_probs = attention_probs * head_mask
345
+
346
+ context_layer = tf.matmul(attention_probs, value_layer)
347
+
348
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
349
+ context_layer = tf.reshape(
350
+ context_layer, (batch_size, -1, self.all_head_size)
351
+ ) # (batch_size, seq_len_q, all_head_size)
352
+
353
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
354
+
355
+ return outputs
356
+
357
+ def build(self, input_shape=None):
358
+ if self.built:
359
+ return
360
+ self.built = True
361
+ if getattr(self, "query", None) is not None:
362
+ with tf.name_scope(self.query.name):
363
+ self.query.build([None, None, self.config.true_hidden_size])
364
+ if getattr(self, "key", None) is not None:
365
+ with tf.name_scope(self.key.name):
366
+ self.key.build([None, None, self.config.true_hidden_size])
367
+ if getattr(self, "value", None) is not None:
368
+ with tf.name_scope(self.value.name):
369
+ self.value.build(
370
+ [
371
+ None,
372
+ None,
373
+ self.config.true_hidden_size
374
+ if self.config.use_bottleneck_attention
375
+ else self.config.hidden_size,
376
+ ]
377
+ )
378
+
379
+
380
+ class TFMobileBertSelfOutput(keras.layers.Layer):
381
+ def __init__(self, config, **kwargs):
382
+ super().__init__(**kwargs)
383
+ self.use_bottleneck = config.use_bottleneck
384
+ self.dense = keras.layers.Dense(
385
+ config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
386
+ )
387
+ self.LayerNorm = NORM2FN[config.normalization_type](
388
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
389
+ )
390
+ if not self.use_bottleneck:
391
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
392
+ self.config = config
393
+
394
+ def call(self, hidden_states, residual_tensor, training=False):
395
+ hidden_states = self.dense(hidden_states)
396
+ if not self.use_bottleneck:
397
+ hidden_states = self.dropout(hidden_states, training=training)
398
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor)
399
+ return hidden_states
400
+
401
+ def build(self, input_shape=None):
402
+ if self.built:
403
+ return
404
+ self.built = True
405
+ if getattr(self, "dense", None) is not None:
406
+ with tf.name_scope(self.dense.name):
407
+ self.dense.build([None, None, self.config.true_hidden_size])
408
+ if getattr(self, "LayerNorm", None) is not None:
409
+ with tf.name_scope(self.LayerNorm.name):
410
+ self.LayerNorm.build(None)
411
+
412
+
413
+ class TFMobileBertAttention(keras.layers.Layer):
414
+ def __init__(self, config, **kwargs):
415
+ super().__init__(**kwargs)
416
+ self.self = TFMobileBertSelfAttention(config, name="self")
417
+ self.mobilebert_output = TFMobileBertSelfOutput(config, name="output")
418
+
419
+ def prune_heads(self, heads):
420
+ raise NotImplementedError
421
+
422
+ def call(
423
+ self,
424
+ query_tensor,
425
+ key_tensor,
426
+ value_tensor,
427
+ layer_input,
428
+ attention_mask,
429
+ head_mask,
430
+ output_attentions,
431
+ training=False,
432
+ ):
433
+ self_outputs = self.self(
434
+ query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=training
435
+ )
436
+
437
+ attention_output = self.mobilebert_output(self_outputs[0], layer_input, training=training)
438
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
439
+ return outputs
440
+
441
+ def build(self, input_shape=None):
442
+ if self.built:
443
+ return
444
+ self.built = True
445
+ if getattr(self, "self", None) is not None:
446
+ with tf.name_scope(self.self.name):
447
+ self.self.build(None)
448
+ if getattr(self, "mobilebert_output", None) is not None:
449
+ with tf.name_scope(self.mobilebert_output.name):
450
+ self.mobilebert_output.build(None)
451
+
452
+
453
+ class TFOutputBottleneck(keras.layers.Layer):
454
+ def __init__(self, config, **kwargs):
455
+ super().__init__(**kwargs)
456
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
457
+ self.LayerNorm = NORM2FN[config.normalization_type](
458
+ config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
459
+ )
460
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
461
+ self.config = config
462
+
463
+ def call(self, hidden_states, residual_tensor, training=False):
464
+ layer_outputs = self.dense(hidden_states)
465
+ layer_outputs = self.dropout(layer_outputs, training=training)
466
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
467
+ return layer_outputs
468
+
469
+ def build(self, input_shape=None):
470
+ if self.built:
471
+ return
472
+ self.built = True
473
+ if getattr(self, "dense", None) is not None:
474
+ with tf.name_scope(self.dense.name):
475
+ self.dense.build([None, None, self.config.true_hidden_size])
476
+ if getattr(self, "LayerNorm", None) is not None:
477
+ with tf.name_scope(self.LayerNorm.name):
478
+ self.LayerNorm.build(None)
479
+
480
+
481
+ class TFMobileBertOutput(keras.layers.Layer):
482
+ def __init__(self, config, **kwargs):
483
+ super().__init__(**kwargs)
484
+ self.use_bottleneck = config.use_bottleneck
485
+ self.dense = keras.layers.Dense(
486
+ config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
487
+ )
488
+ self.LayerNorm = NORM2FN[config.normalization_type](
489
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
490
+ )
491
+ if not self.use_bottleneck:
492
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
493
+ else:
494
+ self.bottleneck = TFOutputBottleneck(config, name="bottleneck")
495
+ self.config = config
496
+
497
+ def call(self, hidden_states, residual_tensor_1, residual_tensor_2, training=False):
498
+ hidden_states = self.dense(hidden_states)
499
+ if not self.use_bottleneck:
500
+ hidden_states = self.dropout(hidden_states, training=training)
501
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
502
+ else:
503
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
504
+ hidden_states = self.bottleneck(hidden_states, residual_tensor_2)
505
+ return hidden_states
506
+
507
+ def build(self, input_shape=None):
508
+ if self.built:
509
+ return
510
+ self.built = True
511
+ if getattr(self, "dense", None) is not None:
512
+ with tf.name_scope(self.dense.name):
513
+ self.dense.build([None, None, self.config.intermediate_size])
514
+ if getattr(self, "LayerNorm", None) is not None:
515
+ with tf.name_scope(self.LayerNorm.name):
516
+ self.LayerNorm.build(None)
517
+ if getattr(self, "bottleneck", None) is not None:
518
+ with tf.name_scope(self.bottleneck.name):
519
+ self.bottleneck.build(None)
520
+
521
+
522
+ class TFBottleneckLayer(keras.layers.Layer):
523
+ def __init__(self, config, **kwargs):
524
+ super().__init__(**kwargs)
525
+ self.dense = keras.layers.Dense(config.intra_bottleneck_size, name="dense")
526
+ self.LayerNorm = NORM2FN[config.normalization_type](
527
+ config.intra_bottleneck_size, epsilon=config.layer_norm_eps, name="LayerNorm"
528
+ )
529
+ self.config = config
530
+
531
+ def call(self, inputs):
532
+ hidden_states = self.dense(inputs)
533
+ hidden_states = self.LayerNorm(hidden_states)
534
+ return hidden_states
535
+
536
+ def build(self, input_shape=None):
537
+ if self.built:
538
+ return
539
+ self.built = True
540
+ if getattr(self, "dense", None) is not None:
541
+ with tf.name_scope(self.dense.name):
542
+ self.dense.build([None, None, self.config.hidden_size])
543
+ if getattr(self, "LayerNorm", None) is not None:
544
+ with tf.name_scope(self.LayerNorm.name):
545
+ self.LayerNorm.build(None)
546
+
547
+
548
+ class TFBottleneck(keras.layers.Layer):
549
+ def __init__(self, config, **kwargs):
550
+ super().__init__(**kwargs)
551
+ self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
552
+ self.use_bottleneck_attention = config.use_bottleneck_attention
553
+ self.bottleneck_input = TFBottleneckLayer(config, name="input")
554
+ if self.key_query_shared_bottleneck:
555
+ self.attention = TFBottleneckLayer(config, name="attention")
556
+
557
+ def call(self, hidden_states):
558
+ # This method can return three different tuples of values. These different values make use of bottlenecks,
559
+ # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
560
+ # usage. These linear layer have weights that are learned during training.
561
+ #
562
+ # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
563
+ # key, query, value, and "layer input" to be used by the attention layer.
564
+ # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
565
+ # in the attention self output, after the attention scores have been computed.
566
+ #
567
+ # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
568
+ # four values, three of which have been passed through a bottleneck: the query and key, passed through the same
569
+ # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
570
+ #
571
+ # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
572
+ # and the residual layer will be this value passed through a bottleneck.
573
+
574
+ bottlenecked_hidden_states = self.bottleneck_input(hidden_states)
575
+ if self.use_bottleneck_attention:
576
+ return (bottlenecked_hidden_states,) * 4
577
+ elif self.key_query_shared_bottleneck:
578
+ shared_attention_input = self.attention(hidden_states)
579
+ return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
580
+ else:
581
+ return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
582
+
583
+ def build(self, input_shape=None):
584
+ if self.built:
585
+ return
586
+ self.built = True
587
+ if getattr(self, "bottleneck_input", None) is not None:
588
+ with tf.name_scope(self.bottleneck_input.name):
589
+ self.bottleneck_input.build(None)
590
+ if getattr(self, "attention", None) is not None:
591
+ with tf.name_scope(self.attention.name):
592
+ self.attention.build(None)
593
+
594
+
595
+ class TFFFNOutput(keras.layers.Layer):
596
+ def __init__(self, config, **kwargs):
597
+ super().__init__(**kwargs)
598
+ self.dense = keras.layers.Dense(config.true_hidden_size, name="dense")
599
+ self.LayerNorm = NORM2FN[config.normalization_type](
600
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
601
+ )
602
+ self.config = config
603
+
604
+ def call(self, hidden_states, residual_tensor):
605
+ hidden_states = self.dense(hidden_states)
606
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor)
607
+ return hidden_states
608
+
609
+ def build(self, input_shape=None):
610
+ if self.built:
611
+ return
612
+ self.built = True
613
+ if getattr(self, "dense", None) is not None:
614
+ with tf.name_scope(self.dense.name):
615
+ self.dense.build([None, None, self.config.intermediate_size])
616
+ if getattr(self, "LayerNorm", None) is not None:
617
+ with tf.name_scope(self.LayerNorm.name):
618
+ self.LayerNorm.build(None)
619
+
620
+
621
+ class TFFFNLayer(keras.layers.Layer):
622
+ def __init__(self, config, **kwargs):
623
+ super().__init__(**kwargs)
624
+ self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
625
+ self.mobilebert_output = TFFFNOutput(config, name="output")
626
+
627
+ def call(self, hidden_states):
628
+ intermediate_output = self.intermediate(hidden_states)
629
+ layer_outputs = self.mobilebert_output(intermediate_output, hidden_states)
630
+ return layer_outputs
631
+
632
+ def build(self, input_shape=None):
633
+ if self.built:
634
+ return
635
+ self.built = True
636
+ if getattr(self, "intermediate", None) is not None:
637
+ with tf.name_scope(self.intermediate.name):
638
+ self.intermediate.build(None)
639
+ if getattr(self, "mobilebert_output", None) is not None:
640
+ with tf.name_scope(self.mobilebert_output.name):
641
+ self.mobilebert_output.build(None)
642
+
643
+
644
+ class TFMobileBertLayer(keras.layers.Layer):
645
+ def __init__(self, config, **kwargs):
646
+ super().__init__(**kwargs)
647
+ self.use_bottleneck = config.use_bottleneck
648
+ self.num_feedforward_networks = config.num_feedforward_networks
649
+ self.attention = TFMobileBertAttention(config, name="attention")
650
+ self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
651
+ self.mobilebert_output = TFMobileBertOutput(config, name="output")
652
+
653
+ if self.use_bottleneck:
654
+ self.bottleneck = TFBottleneck(config, name="bottleneck")
655
+ if config.num_feedforward_networks > 1:
656
+ self.ffn = [TFFFNLayer(config, name=f"ffn.{i}") for i in range(config.num_feedforward_networks - 1)]
657
+
658
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
659
+ if self.use_bottleneck:
660
+ query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
661
+ else:
662
+ query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
663
+
664
+ attention_outputs = self.attention(
665
+ query_tensor,
666
+ key_tensor,
667
+ value_tensor,
668
+ layer_input,
669
+ attention_mask,
670
+ head_mask,
671
+ output_attentions,
672
+ training=training,
673
+ )
674
+
675
+ attention_output = attention_outputs[0]
676
+ s = (attention_output,)
677
+
678
+ if self.num_feedforward_networks != 1:
679
+ for i, ffn_module in enumerate(self.ffn):
680
+ attention_output = ffn_module(attention_output)
681
+ s += (attention_output,)
682
+
683
+ intermediate_output = self.intermediate(attention_output)
684
+ layer_output = self.mobilebert_output(intermediate_output, attention_output, hidden_states, training=training)
685
+
686
+ outputs = (
687
+ (layer_output,)
688
+ + attention_outputs[1:]
689
+ + (
690
+ tf.constant(0),
691
+ query_tensor,
692
+ key_tensor,
693
+ value_tensor,
694
+ layer_input,
695
+ attention_output,
696
+ intermediate_output,
697
+ )
698
+ + s
699
+ ) # add attentions if we output them
700
+
701
+ return outputs
702
+
703
+ def build(self, input_shape=None):
704
+ if self.built:
705
+ return
706
+ self.built = True
707
+ if getattr(self, "attention", None) is not None:
708
+ with tf.name_scope(self.attention.name):
709
+ self.attention.build(None)
710
+ if getattr(self, "intermediate", None) is not None:
711
+ with tf.name_scope(self.intermediate.name):
712
+ self.intermediate.build(None)
713
+ if getattr(self, "mobilebert_output", None) is not None:
714
+ with tf.name_scope(self.mobilebert_output.name):
715
+ self.mobilebert_output.build(None)
716
+ if getattr(self, "bottleneck", None) is not None:
717
+ with tf.name_scope(self.bottleneck.name):
718
+ self.bottleneck.build(None)
719
+ if getattr(self, "ffn", None) is not None:
720
+ for layer in self.ffn:
721
+ with tf.name_scope(layer.name):
722
+ layer.build(None)
723
+
724
+
725
+ class TFMobileBertEncoder(keras.layers.Layer):
726
+ def __init__(self, config, **kwargs):
727
+ super().__init__(**kwargs)
728
+ self.output_attentions = config.output_attentions
729
+ self.output_hidden_states = config.output_hidden_states
730
+ self.layer = [TFMobileBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
731
+
732
+ def call(
733
+ self,
734
+ hidden_states,
735
+ attention_mask,
736
+ head_mask,
737
+ output_attentions,
738
+ output_hidden_states,
739
+ return_dict,
740
+ training=False,
741
+ ):
742
+ all_hidden_states = () if output_hidden_states else None
743
+ all_attentions = () if output_attentions else None
744
+ for i, layer_module in enumerate(self.layer):
745
+ if output_hidden_states:
746
+ all_hidden_states = all_hidden_states + (hidden_states,)
747
+
748
+ layer_outputs = layer_module(
749
+ hidden_states, attention_mask, head_mask[i], output_attentions, training=training
750
+ )
751
+
752
+ hidden_states = layer_outputs[0]
753
+
754
+ if output_attentions:
755
+ all_attentions = all_attentions + (layer_outputs[1],)
756
+
757
+ # Add last layer
758
+ if output_hidden_states:
759
+ all_hidden_states = all_hidden_states + (hidden_states,)
760
+
761
+ if not return_dict:
762
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
763
+ return TFBaseModelOutput(
764
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
765
+ )
766
+
767
+ def build(self, input_shape=None):
768
+ if self.built:
769
+ return
770
+ self.built = True
771
+ if getattr(self, "layer", None) is not None:
772
+ for layer in self.layer:
773
+ with tf.name_scope(layer.name):
774
+ layer.build(None)
775
+
776
+
777
+ class TFMobileBertPooler(keras.layers.Layer):
778
+ def __init__(self, config, **kwargs):
779
+ super().__init__(**kwargs)
780
+ self.do_activate = config.classifier_activation
781
+ if self.do_activate:
782
+ self.dense = keras.layers.Dense(
783
+ config.hidden_size,
784
+ kernel_initializer=get_initializer(config.initializer_range),
785
+ activation="tanh",
786
+ name="dense",
787
+ )
788
+ self.config = config
789
+
790
+ def call(self, hidden_states):
791
+ # We "pool" the model by simply taking the hidden state corresponding
792
+ # to the first token.
793
+ first_token_tensor = hidden_states[:, 0]
794
+ if not self.do_activate:
795
+ return first_token_tensor
796
+ else:
797
+ pooled_output = self.dense(first_token_tensor)
798
+ return pooled_output
799
+
800
+ def build(self, input_shape=None):
801
+ if self.built:
802
+ return
803
+ self.built = True
804
+ if getattr(self, "dense", None) is not None:
805
+ with tf.name_scope(self.dense.name):
806
+ self.dense.build([None, None, self.config.hidden_size])
807
+
808
+
809
+ class TFMobileBertPredictionHeadTransform(keras.layers.Layer):
810
+ def __init__(self, config, **kwargs):
811
+ super().__init__(**kwargs)
812
+ self.dense = keras.layers.Dense(
813
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
814
+ )
815
+ if isinstance(config.hidden_act, str):
816
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
817
+ else:
818
+ self.transform_act_fn = config.hidden_act
819
+ self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm")
820
+ self.config = config
821
+
822
+ def call(self, hidden_states):
823
+ hidden_states = self.dense(hidden_states)
824
+ hidden_states = self.transform_act_fn(hidden_states)
825
+ hidden_states = self.LayerNorm(hidden_states)
826
+ return hidden_states
827
+
828
+ def build(self, input_shape=None):
829
+ if self.built:
830
+ return
831
+ self.built = True
832
+ if getattr(self, "dense", None) is not None:
833
+ with tf.name_scope(self.dense.name):
834
+ self.dense.build([None, None, self.config.hidden_size])
835
+ if getattr(self, "LayerNorm", None) is not None:
836
+ with tf.name_scope(self.LayerNorm.name):
837
+ self.LayerNorm.build(None)
838
+
839
+
840
+ class TFMobileBertLMPredictionHead(keras.layers.Layer):
841
+ def __init__(self, config, **kwargs):
842
+ super().__init__(**kwargs)
843
+ self.transform = TFMobileBertPredictionHeadTransform(config, name="transform")
844
+ self.config = config
845
+
846
+ def build(self, input_shape=None):
847
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
848
+ self.dense = self.add_weight(
849
+ shape=(self.config.hidden_size - self.config.embedding_size, self.config.vocab_size),
850
+ initializer="zeros",
851
+ trainable=True,
852
+ name="dense/weight",
853
+ )
854
+ self.decoder = self.add_weight(
855
+ shape=(self.config.vocab_size, self.config.embedding_size),
856
+ initializer="zeros",
857
+ trainable=True,
858
+ name="decoder/weight",
859
+ )
860
+
861
+ if self.built:
862
+ return
863
+ self.built = True
864
+ if getattr(self, "transform", None) is not None:
865
+ with tf.name_scope(self.transform.name):
866
+ self.transform.build(None)
867
+
868
+ def get_output_embeddings(self):
869
+ return self
870
+
871
+ def set_output_embeddings(self, value):
872
+ self.decoder = value
873
+ self.config.vocab_size = shape_list(value)[0]
874
+
875
+ def get_bias(self):
876
+ return {"bias": self.bias}
877
+
878
+ def set_bias(self, value):
879
+ self.bias = value["bias"]
880
+ self.config.vocab_size = shape_list(value["bias"])[0]
881
+
882
+ def call(self, hidden_states):
883
+ hidden_states = self.transform(hidden_states)
884
+ hidden_states = tf.matmul(hidden_states, tf.concat([tf.transpose(self.decoder), self.dense], axis=0))
885
+ hidden_states = hidden_states + self.bias
886
+ return hidden_states
887
+
888
+
889
+ class TFMobileBertMLMHead(keras.layers.Layer):
890
+ def __init__(self, config, **kwargs):
891
+ super().__init__(**kwargs)
892
+ self.predictions = TFMobileBertLMPredictionHead(config, name="predictions")
893
+
894
+ def call(self, sequence_output):
895
+ prediction_scores = self.predictions(sequence_output)
896
+ return prediction_scores
897
+
898
+ def build(self, input_shape=None):
899
+ if self.built:
900
+ return
901
+ self.built = True
902
+ if getattr(self, "predictions", None) is not None:
903
+ with tf.name_scope(self.predictions.name):
904
+ self.predictions.build(None)
905
+
906
+
907
+ @keras_serializable
908
+ class TFMobileBertMainLayer(keras.layers.Layer):
909
+ config_class = MobileBertConfig
910
+
911
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
912
+ super().__init__(**kwargs)
913
+
914
+ self.config = config
915
+ self.num_hidden_layers = config.num_hidden_layers
916
+ self.output_attentions = config.output_attentions
917
+ self.output_hidden_states = config.output_hidden_states
918
+ self.return_dict = config.use_return_dict
919
+
920
+ self.embeddings = TFMobileBertEmbeddings(config, name="embeddings")
921
+ self.encoder = TFMobileBertEncoder(config, name="encoder")
922
+ self.pooler = TFMobileBertPooler(config, name="pooler") if add_pooling_layer else None
923
+
924
+ def get_input_embeddings(self):
925
+ return self.embeddings
926
+
927
+ def set_input_embeddings(self, value):
928
+ self.embeddings.weight = value
929
+ self.embeddings.vocab_size = shape_list(value)[0]
930
+
931
+ def _prune_heads(self, heads_to_prune):
932
+ """
933
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
934
+ class PreTrainedModel
935
+ """
936
+ raise NotImplementedError
937
+
938
+ @unpack_inputs
939
+ def call(
940
+ self,
941
+ input_ids=None,
942
+ attention_mask=None,
943
+ token_type_ids=None,
944
+ position_ids=None,
945
+ head_mask=None,
946
+ inputs_embeds=None,
947
+ output_attentions=None,
948
+ output_hidden_states=None,
949
+ return_dict=None,
950
+ training=False,
951
+ ):
952
+ if input_ids is not None and inputs_embeds is not None:
953
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
954
+ elif input_ids is not None:
955
+ input_shape = shape_list(input_ids)
956
+ elif inputs_embeds is not None:
957
+ input_shape = shape_list(inputs_embeds)[:-1]
958
+ else:
959
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
960
+
961
+ if attention_mask is None:
962
+ attention_mask = tf.fill(input_shape, 1)
963
+
964
+ if token_type_ids is None:
965
+ token_type_ids = tf.fill(input_shape, 0)
966
+
967
+ embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
968
+
969
+ # We create a 3D attention mask from a 2D tensor mask.
970
+ # Sizes are [batch_size, 1, 1, to_seq_length]
971
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
972
+ # this attention mask is more simple than the triangular masking of causal attention
973
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
974
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
975
+
976
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
977
+ # masked positions, this operation will create a tensor which is 0.0 for
978
+ # positions we want to attend and -10000.0 for masked positions.
979
+ # Since we are adding it to the raw scores before the softmax, this is
980
+ # effectively the same as removing these entirely.
981
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
982
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
983
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
984
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
985
+
986
+ # Prepare head mask if needed
987
+ # 1.0 in head_mask indicate we keep the head
988
+ # attention_probs has shape bsz x n_heads x N x N
989
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
990
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
991
+ if head_mask is not None:
992
+ raise NotImplementedError
993
+ else:
994
+ head_mask = [None] * self.num_hidden_layers
995
+
996
+ encoder_outputs = self.encoder(
997
+ embedding_output,
998
+ extended_attention_mask,
999
+ head_mask,
1000
+ output_attentions,
1001
+ output_hidden_states,
1002
+ return_dict,
1003
+ training=training,
1004
+ )
1005
+
1006
+ sequence_output = encoder_outputs[0]
1007
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1008
+
1009
+ if not return_dict:
1010
+ return (
1011
+ sequence_output,
1012
+ pooled_output,
1013
+ ) + encoder_outputs[1:]
1014
+
1015
+ return TFBaseModelOutputWithPooling(
1016
+ last_hidden_state=sequence_output,
1017
+ pooler_output=pooled_output,
1018
+ hidden_states=encoder_outputs.hidden_states,
1019
+ attentions=encoder_outputs.attentions,
1020
+ )
1021
+
1022
+ def build(self, input_shape=None):
1023
+ if self.built:
1024
+ return
1025
+ self.built = True
1026
+ if getattr(self, "embeddings", None) is not None:
1027
+ with tf.name_scope(self.embeddings.name):
1028
+ self.embeddings.build(None)
1029
+ if getattr(self, "encoder", None) is not None:
1030
+ with tf.name_scope(self.encoder.name):
1031
+ self.encoder.build(None)
1032
+ if getattr(self, "pooler", None) is not None:
1033
+ with tf.name_scope(self.pooler.name):
1034
+ self.pooler.build(None)
1035
+
1036
+
1037
+ class TFMobileBertPreTrainedModel(TFPreTrainedModel):
1038
+ """
1039
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1040
+ models.
1041
+ """
1042
+
1043
+ config_class = MobileBertConfig
1044
+ base_model_prefix = "mobilebert"
1045
+
1046
+
1047
+ @dataclass
1048
+ class TFMobileBertForPreTrainingOutput(ModelOutput):
1049
+ """
1050
+ Output type of [`TFMobileBertForPreTraining`].
1051
+
1052
+ Args:
1053
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1054
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1055
+ seq_relationship_logits (`tf.Tensor` of shape `(batch_size, 2)`):
1056
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
1057
+ before SoftMax).
1058
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
1059
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
1060
+ `(batch_size, sequence_length, hidden_size)`.
1061
+
1062
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1063
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
1064
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
1065
+ sequence_length)`.
1066
+
1067
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1068
+ heads.
1069
+ """
1070
+
1071
+ loss: tf.Tensor | None = None
1072
+ prediction_logits: tf.Tensor = None
1073
+ seq_relationship_logits: tf.Tensor = None
1074
+ hidden_states: Tuple[tf.Tensor] | None = None
1075
+ attentions: Tuple[tf.Tensor] | None = None
1076
+
1077
+
1078
+ MOBILEBERT_START_DOCSTRING = r"""
1079
+
1080
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1081
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1082
+ etc.)
1083
+
1084
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1085
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1086
+ behavior.
1087
+
1088
+ <Tip>
1089
+
1090
+ TensorFlow models and layers in `transformers` accept two formats as input:
1091
+
1092
+ - having all inputs as keyword arguments (like PyTorch models), or
1093
+ - having all inputs as a list, tuple or dict in the first positional argument.
1094
+
1095
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1096
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1097
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1098
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1099
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1100
+ positional argument:
1101
+
1102
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1103
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1104
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1105
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1106
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1107
+
1108
+ Note that when creating models and layers with
1109
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1110
+ about any of this, as you can just pass inputs like you would to any other Python function!
1111
+
1112
+ </Tip>
1113
+
1114
+ Parameters:
1115
+ config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
1116
+ Initializing with a config file does not load the weights associated with the model, only the
1117
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1118
+ """
1119
+
1120
+ MOBILEBERT_INPUTS_DOCSTRING = r"""
1121
+ Args:
1122
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
1123
+ Indices of input sequence tokens in the vocabulary.
1124
+
1125
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1126
+ [`PreTrainedTokenizer.encode`] for details.
1127
+
1128
+ [What are input IDs?](../glossary#input-ids)
1129
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1130
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1131
+
1132
+ - 1 for tokens that are **not masked**,
1133
+ - 0 for tokens that are **masked**.
1134
+
1135
+ [What are attention masks?](../glossary#attention-mask)
1136
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1137
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1138
+ 1]`:
1139
+
1140
+ - 0 corresponds to a *sentence A* token,
1141
+ - 1 corresponds to a *sentence B* token.
1142
+
1143
+ [What are token type IDs?](../glossary#token-type-ids)
1144
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1145
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1146
+ config.max_position_embeddings - 1]`.
1147
+
1148
+ [What are position IDs?](../glossary#position-ids)
1149
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1150
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1151
+
1152
+ - 1 indicates the head is **not masked**,
1153
+ - 0 indicates the head is **masked**.
1154
+
1155
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1156
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1157
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1158
+ model's internal embedding lookup matrix.
1159
+ output_attentions (`bool`, *optional*):
1160
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1161
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1162
+ config will be used instead.
1163
+ output_hidden_states (`bool`, *optional*):
1164
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1165
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1166
+ used instead.
1167
+ return_dict (`bool`, *optional*):
1168
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1169
+ eager mode, in graph mode the value will always be set to True.
1170
+ training (`bool`, *optional*, defaults to `False`):
1171
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1172
+ behaviors between training and evaluation).
1173
+ """
1174
+
1175
+
1176
+ @add_start_docstrings(
1177
+ "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
1178
+ MOBILEBERT_START_DOCSTRING,
1179
+ )
1180
+ class TFMobileBertModel(TFMobileBertPreTrainedModel):
1181
+ def __init__(self, config, *inputs, **kwargs):
1182
+ super().__init__(config, *inputs, **kwargs)
1183
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1184
+
1185
+ @unpack_inputs
1186
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1187
+ @add_code_sample_docstrings(
1188
+ checkpoint=_CHECKPOINT_FOR_DOC,
1189
+ output_type=TFBaseModelOutputWithPooling,
1190
+ config_class=_CONFIG_FOR_DOC,
1191
+ )
1192
+ def call(
1193
+ self,
1194
+ input_ids: TFModelInputType | None = None,
1195
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1196
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1197
+ position_ids: np.ndarray | tf.Tensor | None = None,
1198
+ head_mask: np.ndarray | tf.Tensor | None = None,
1199
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1200
+ output_attentions: Optional[bool] = None,
1201
+ output_hidden_states: Optional[bool] = None,
1202
+ return_dict: Optional[bool] = None,
1203
+ training: Optional[bool] = False,
1204
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
1205
+ outputs = self.mobilebert(
1206
+ input_ids=input_ids,
1207
+ attention_mask=attention_mask,
1208
+ token_type_ids=token_type_ids,
1209
+ position_ids=position_ids,
1210
+ head_mask=head_mask,
1211
+ inputs_embeds=inputs_embeds,
1212
+ output_attentions=output_attentions,
1213
+ output_hidden_states=output_hidden_states,
1214
+ return_dict=return_dict,
1215
+ training=training,
1216
+ )
1217
+
1218
+ return outputs
1219
+
1220
+ def build(self, input_shape=None):
1221
+ if self.built:
1222
+ return
1223
+ self.built = True
1224
+ if getattr(self, "mobilebert", None) is not None:
1225
+ with tf.name_scope(self.mobilebert.name):
1226
+ self.mobilebert.build(None)
1227
+
1228
+
1229
+ @add_start_docstrings(
1230
+ """
1231
+ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
1232
+ `next sentence prediction (classification)` head.
1233
+ """,
1234
+ MOBILEBERT_START_DOCSTRING,
1235
+ )
1236
+ class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel, TFMobileBertPreTrainingLoss):
1237
+ def __init__(self, config, *inputs, **kwargs):
1238
+ super().__init__(config, *inputs, **kwargs)
1239
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1240
+ self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
1241
+ self.seq_relationship = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls")
1242
+
1243
+ def get_lm_head(self):
1244
+ return self.predictions.predictions
1245
+
1246
+ def get_prefix_bias_name(self):
1247
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1248
+ return self.name + "/" + self.predictions.name + "/" + self.predictions.predictions.name
1249
+
1250
+ @unpack_inputs
1251
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1252
+ @replace_return_docstrings(output_type=TFMobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1253
+ def call(
1254
+ self,
1255
+ input_ids: TFModelInputType | None = None,
1256
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1257
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1258
+ position_ids: np.ndarray | tf.Tensor | None = None,
1259
+ head_mask: np.ndarray | tf.Tensor | None = None,
1260
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1261
+ output_attentions: Optional[bool] = None,
1262
+ output_hidden_states: Optional[bool] = None,
1263
+ return_dict: Optional[bool] = None,
1264
+ labels: np.ndarray | tf.Tensor | None = None,
1265
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1266
+ training: Optional[bool] = False,
1267
+ ) -> Union[Tuple, TFMobileBertForPreTrainingOutput]:
1268
+ r"""
1269
+ Return:
1270
+
1271
+ Examples:
1272
+
1273
+ ```python
1274
+ >>> import tensorflow as tf
1275
+ >>> from transformers import AutoTokenizer, TFMobileBertForPreTraining
1276
+
1277
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
1278
+ >>> model = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
1279
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
1280
+ >>> outputs = model(input_ids)
1281
+ >>> prediction_scores, seq_relationship_scores = outputs[:2]
1282
+ ```"""
1283
+ outputs = self.mobilebert(
1284
+ input_ids,
1285
+ attention_mask=attention_mask,
1286
+ token_type_ids=token_type_ids,
1287
+ position_ids=position_ids,
1288
+ head_mask=head_mask,
1289
+ inputs_embeds=inputs_embeds,
1290
+ output_attentions=output_attentions,
1291
+ output_hidden_states=output_hidden_states,
1292
+ return_dict=return_dict,
1293
+ training=training,
1294
+ )
1295
+
1296
+ sequence_output, pooled_output = outputs[:2]
1297
+ prediction_scores = self.predictions(sequence_output)
1298
+ seq_relationship_score = self.seq_relationship(pooled_output)
1299
+
1300
+ total_loss = None
1301
+ if labels is not None and next_sentence_label is not None:
1302
+ d_labels = {"labels": labels}
1303
+ d_labels["next_sentence_label"] = next_sentence_label
1304
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))
1305
+
1306
+ if not return_dict:
1307
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1308
+ return ((total_loss,) + output) if total_loss is not None else output
1309
+
1310
+ return TFMobileBertForPreTrainingOutput(
1311
+ loss=total_loss,
1312
+ prediction_logits=prediction_scores,
1313
+ seq_relationship_logits=seq_relationship_score,
1314
+ hidden_states=outputs.hidden_states,
1315
+ attentions=outputs.attentions,
1316
+ )
1317
+
1318
+ def build(self, input_shape=None):
1319
+ if self.built:
1320
+ return
1321
+ self.built = True
1322
+ if getattr(self, "mobilebert", None) is not None:
1323
+ with tf.name_scope(self.mobilebert.name):
1324
+ self.mobilebert.build(None)
1325
+ if getattr(self, "predictions", None) is not None:
1326
+ with tf.name_scope(self.predictions.name):
1327
+ self.predictions.build(None)
1328
+ if getattr(self, "seq_relationship", None) is not None:
1329
+ with tf.name_scope(self.seq_relationship.name):
1330
+ self.seq_relationship.build(None)
1331
+
1332
+ def tf_to_pt_weight_rename(self, tf_weight):
1333
+ if tf_weight == "cls.predictions.decoder.weight":
1334
+ return tf_weight, "mobilebert.embeddings.word_embeddings.weight"
1335
+ else:
1336
+ return (tf_weight,)
1337
+
1338
+
1339
+ @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
1340
+ class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModelingLoss):
1341
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1342
+ _keys_to_ignore_on_load_unexpected = [
1343
+ r"pooler",
1344
+ r"seq_relationship___cls",
1345
+ r"cls.seq_relationship",
1346
+ ]
1347
+
1348
+ def __init__(self, config, *inputs, **kwargs):
1349
+ super().__init__(config, *inputs, **kwargs)
1350
+
1351
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
1352
+ self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
1353
+
1354
+ def get_lm_head(self):
1355
+ return self.predictions.predictions
1356
+
1357
+ def get_prefix_bias_name(self):
1358
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1359
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
1360
+
1361
+ @unpack_inputs
1362
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1363
+ @add_code_sample_docstrings(
1364
+ checkpoint=_CHECKPOINT_FOR_DOC,
1365
+ output_type=TFMaskedLMOutput,
1366
+ config_class=_CONFIG_FOR_DOC,
1367
+ expected_output="'paris'",
1368
+ expected_loss=0.57,
1369
+ )
1370
+ def call(
1371
+ self,
1372
+ input_ids: TFModelInputType | None = None,
1373
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1374
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1375
+ position_ids: np.ndarray | tf.Tensor | None = None,
1376
+ head_mask: np.ndarray | tf.Tensor | None = None,
1377
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1378
+ output_attentions: Optional[bool] = None,
1379
+ output_hidden_states: Optional[bool] = None,
1380
+ return_dict: Optional[bool] = None,
1381
+ labels: np.ndarray | tf.Tensor | None = None,
1382
+ training: Optional[bool] = False,
1383
+ ) -> Union[Tuple, TFMaskedLMOutput]:
1384
+ r"""
1385
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1386
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1387
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1388
+ loss is only computed for the tokens with labels
1389
+ """
1390
+ outputs = self.mobilebert(
1391
+ input_ids,
1392
+ attention_mask=attention_mask,
1393
+ token_type_ids=token_type_ids,
1394
+ position_ids=position_ids,
1395
+ head_mask=head_mask,
1396
+ inputs_embeds=inputs_embeds,
1397
+ output_attentions=output_attentions,
1398
+ output_hidden_states=output_hidden_states,
1399
+ return_dict=return_dict,
1400
+ training=training,
1401
+ )
1402
+ sequence_output = outputs[0]
1403
+ prediction_scores = self.predictions(sequence_output, training=training)
1404
+
1405
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1406
+
1407
+ if not return_dict:
1408
+ output = (prediction_scores,) + outputs[2:]
1409
+ return ((loss,) + output) if loss is not None else output
1410
+
1411
+ return TFMaskedLMOutput(
1412
+ loss=loss,
1413
+ logits=prediction_scores,
1414
+ hidden_states=outputs.hidden_states,
1415
+ attentions=outputs.attentions,
1416
+ )
1417
+
1418
+ def build(self, input_shape=None):
1419
+ if self.built:
1420
+ return
1421
+ self.built = True
1422
+ if getattr(self, "mobilebert", None) is not None:
1423
+ with tf.name_scope(self.mobilebert.name):
1424
+ self.mobilebert.build(None)
1425
+ if getattr(self, "predictions", None) is not None:
1426
+ with tf.name_scope(self.predictions.name):
1427
+ self.predictions.build(None)
1428
+
1429
+ def tf_to_pt_weight_rename(self, tf_weight):
1430
+ if tf_weight == "cls.predictions.decoder.weight":
1431
+ return tf_weight, "mobilebert.embeddings.word_embeddings.weight"
1432
+ else:
1433
+ return (tf_weight,)
1434
+
1435
+
1436
+ class TFMobileBertOnlyNSPHead(keras.layers.Layer):
1437
+ def __init__(self, config, **kwargs):
1438
+ super().__init__(**kwargs)
1439
+ self.seq_relationship = keras.layers.Dense(2, name="seq_relationship")
1440
+ self.config = config
1441
+
1442
+ def call(self, pooled_output):
1443
+ seq_relationship_score = self.seq_relationship(pooled_output)
1444
+ return seq_relationship_score
1445
+
1446
+ def build(self, input_shape=None):
1447
+ if self.built:
1448
+ return
1449
+ self.built = True
1450
+ if getattr(self, "seq_relationship", None) is not None:
1451
+ with tf.name_scope(self.seq_relationship.name):
1452
+ self.seq_relationship.build([None, None, self.config.hidden_size])
1453
+
1454
+
1455
+ @add_start_docstrings(
1456
+ """MobileBert Model with a `next sentence prediction (classification)` head on top.""",
1457
+ MOBILEBERT_START_DOCSTRING,
1458
+ )
1459
+ class TFMobileBertForNextSentencePrediction(TFMobileBertPreTrainedModel, TFNextSentencePredictionLoss):
1460
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1461
+ _keys_to_ignore_on_load_unexpected = [r"predictions___cls", r"cls.predictions"]
1462
+
1463
+ def __init__(self, config, *inputs, **kwargs):
1464
+ super().__init__(config, *inputs, **kwargs)
1465
+
1466
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1467
+ self.cls = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls")
1468
+
1469
+ @unpack_inputs
1470
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1471
+ @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1472
+ def call(
1473
+ self,
1474
+ input_ids: TFModelInputType | None = None,
1475
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1476
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1477
+ position_ids: np.ndarray | tf.Tensor | None = None,
1478
+ head_mask: np.ndarray | tf.Tensor | None = None,
1479
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1480
+ output_attentions: Optional[bool] = None,
1481
+ output_hidden_states: Optional[bool] = None,
1482
+ return_dict: Optional[bool] = None,
1483
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
1484
+ training: Optional[bool] = False,
1485
+ ) -> Union[Tuple, TFNextSentencePredictorOutput]:
1486
+ r"""
1487
+ Return:
1488
+
1489
+ Examples:
1490
+
1491
+ ```python
1492
+ >>> import tensorflow as tf
1493
+ >>> from transformers import AutoTokenizer, TFMobileBertForNextSentencePrediction
1494
+
1495
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
1496
+ >>> model = TFMobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
1497
+
1498
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1499
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1500
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="tf")
1501
+
1502
+ >>> logits = model(encoding["input_ids"], token_type_ids=encoding["token_type_ids"])[0]
1503
+ ```"""
1504
+ outputs = self.mobilebert(
1505
+ input_ids,
1506
+ attention_mask=attention_mask,
1507
+ token_type_ids=token_type_ids,
1508
+ position_ids=position_ids,
1509
+ head_mask=head_mask,
1510
+ inputs_embeds=inputs_embeds,
1511
+ output_attentions=output_attentions,
1512
+ output_hidden_states=output_hidden_states,
1513
+ return_dict=return_dict,
1514
+ training=training,
1515
+ )
1516
+ pooled_output = outputs[1]
1517
+ seq_relationship_scores = self.cls(pooled_output)
1518
+
1519
+ next_sentence_loss = (
1520
+ None
1521
+ if next_sentence_label is None
1522
+ else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores)
1523
+ )
1524
+
1525
+ if not return_dict:
1526
+ output = (seq_relationship_scores,) + outputs[2:]
1527
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1528
+
1529
+ return TFNextSentencePredictorOutput(
1530
+ loss=next_sentence_loss,
1531
+ logits=seq_relationship_scores,
1532
+ hidden_states=outputs.hidden_states,
1533
+ attentions=outputs.attentions,
1534
+ )
1535
+
1536
+ def build(self, input_shape=None):
1537
+ if self.built:
1538
+ return
1539
+ self.built = True
1540
+ if getattr(self, "mobilebert", None) is not None:
1541
+ with tf.name_scope(self.mobilebert.name):
1542
+ self.mobilebert.build(None)
1543
+ if getattr(self, "cls", None) is not None:
1544
+ with tf.name_scope(self.cls.name):
1545
+ self.cls.build(None)
1546
+
1547
+
1548
+ @add_start_docstrings(
1549
+ """
1550
+ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1551
+ pooled output) e.g. for GLUE tasks.
1552
+ """,
1553
+ MOBILEBERT_START_DOCSTRING,
1554
+ )
1555
+ class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSequenceClassificationLoss):
1556
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1557
+ _keys_to_ignore_on_load_unexpected = [
1558
+ r"predictions___cls",
1559
+ r"seq_relationship___cls",
1560
+ r"cls.predictions",
1561
+ r"cls.seq_relationship",
1562
+ ]
1563
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1564
+
1565
+ def __init__(self, config, *inputs, **kwargs):
1566
+ super().__init__(config, *inputs, **kwargs)
1567
+ self.num_labels = config.num_labels
1568
+
1569
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1570
+ classifier_dropout = (
1571
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1572
+ )
1573
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1574
+ self.classifier = keras.layers.Dense(
1575
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1576
+ )
1577
+ self.config = config
1578
+
1579
+ @unpack_inputs
1580
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1581
+ @add_code_sample_docstrings(
1582
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
1583
+ output_type=TFSequenceClassifierOutput,
1584
+ config_class=_CONFIG_FOR_DOC,
1585
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
1586
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
1587
+ )
1588
+ def call(
1589
+ self,
1590
+ input_ids: TFModelInputType | None = None,
1591
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1592
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1593
+ position_ids: np.ndarray | tf.Tensor | None = None,
1594
+ head_mask: np.ndarray | tf.Tensor | None = None,
1595
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1596
+ output_attentions: Optional[bool] = None,
1597
+ output_hidden_states: Optional[bool] = None,
1598
+ return_dict: Optional[bool] = None,
1599
+ labels: np.ndarray | tf.Tensor | None = None,
1600
+ training: Optional[bool] = False,
1601
+ ) -> Union[Tuple, TFSequenceClassifierOutput]:
1602
+ r"""
1603
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1604
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1605
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1606
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1607
+ """
1608
+ outputs = self.mobilebert(
1609
+ input_ids,
1610
+ attention_mask=attention_mask,
1611
+ token_type_ids=token_type_ids,
1612
+ position_ids=position_ids,
1613
+ head_mask=head_mask,
1614
+ inputs_embeds=inputs_embeds,
1615
+ output_attentions=output_attentions,
1616
+ output_hidden_states=output_hidden_states,
1617
+ return_dict=return_dict,
1618
+ training=training,
1619
+ )
1620
+ pooled_output = outputs[1]
1621
+
1622
+ pooled_output = self.dropout(pooled_output, training=training)
1623
+ logits = self.classifier(pooled_output)
1624
+
1625
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1626
+
1627
+ if not return_dict:
1628
+ output = (logits,) + outputs[2:]
1629
+ return ((loss,) + output) if loss is not None else output
1630
+
1631
+ return TFSequenceClassifierOutput(
1632
+ loss=loss,
1633
+ logits=logits,
1634
+ hidden_states=outputs.hidden_states,
1635
+ attentions=outputs.attentions,
1636
+ )
1637
+
1638
+ def build(self, input_shape=None):
1639
+ if self.built:
1640
+ return
1641
+ self.built = True
1642
+ if getattr(self, "mobilebert", None) is not None:
1643
+ with tf.name_scope(self.mobilebert.name):
1644
+ self.mobilebert.build(None)
1645
+ if getattr(self, "classifier", None) is not None:
1646
+ with tf.name_scope(self.classifier.name):
1647
+ self.classifier.build([None, None, self.config.hidden_size])
1648
+
1649
+
1650
+ @add_start_docstrings(
1651
+ """
1652
+ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1653
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1654
+ """,
1655
+ MOBILEBERT_START_DOCSTRING,
1656
+ )
1657
+ class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAnsweringLoss):
1658
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1659
+ _keys_to_ignore_on_load_unexpected = [
1660
+ r"pooler",
1661
+ r"predictions___cls",
1662
+ r"seq_relationship___cls",
1663
+ r"cls.predictions",
1664
+ r"cls.seq_relationship",
1665
+ ]
1666
+
1667
+ def __init__(self, config, *inputs, **kwargs):
1668
+ super().__init__(config, *inputs, **kwargs)
1669
+ self.num_labels = config.num_labels
1670
+
1671
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
1672
+ self.qa_outputs = keras.layers.Dense(
1673
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1674
+ )
1675
+ self.config = config
1676
+
1677
+ @unpack_inputs
1678
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1679
+ @add_code_sample_docstrings(
1680
+ checkpoint=_CHECKPOINT_FOR_QA,
1681
+ output_type=TFQuestionAnsweringModelOutput,
1682
+ config_class=_CONFIG_FOR_DOC,
1683
+ qa_target_start_index=_QA_TARGET_START_INDEX,
1684
+ qa_target_end_index=_QA_TARGET_END_INDEX,
1685
+ expected_output=_QA_EXPECTED_OUTPUT,
1686
+ expected_loss=_QA_EXPECTED_LOSS,
1687
+ )
1688
+ def call(
1689
+ self,
1690
+ input_ids: TFModelInputType | None = None,
1691
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1692
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1693
+ position_ids: np.ndarray | tf.Tensor | None = None,
1694
+ head_mask: np.ndarray | tf.Tensor | None = None,
1695
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1696
+ output_attentions: Optional[bool] = None,
1697
+ output_hidden_states: Optional[bool] = None,
1698
+ return_dict: Optional[bool] = None,
1699
+ start_positions: np.ndarray | tf.Tensor | None = None,
1700
+ end_positions: np.ndarray | tf.Tensor | None = None,
1701
+ training: Optional[bool] = False,
1702
+ ) -> Union[Tuple, TFQuestionAnsweringModelOutput]:
1703
+ r"""
1704
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1705
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1706
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1707
+ are not taken into account for computing the loss.
1708
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1709
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1710
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1711
+ are not taken into account for computing the loss.
1712
+ """
1713
+ outputs = self.mobilebert(
1714
+ input_ids,
1715
+ attention_mask=attention_mask,
1716
+ token_type_ids=token_type_ids,
1717
+ position_ids=position_ids,
1718
+ head_mask=head_mask,
1719
+ inputs_embeds=inputs_embeds,
1720
+ output_attentions=output_attentions,
1721
+ output_hidden_states=output_hidden_states,
1722
+ return_dict=return_dict,
1723
+ training=training,
1724
+ )
1725
+ sequence_output = outputs[0]
1726
+
1727
+ logits = self.qa_outputs(sequence_output)
1728
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1729
+ start_logits = tf.squeeze(start_logits, axis=-1)
1730
+ end_logits = tf.squeeze(end_logits, axis=-1)
1731
+
1732
+ loss = None
1733
+ if start_positions is not None and end_positions is not None:
1734
+ labels = {"start_position": start_positions, "end_position": end_positions}
1735
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1736
+
1737
+ if not return_dict:
1738
+ output = (start_logits, end_logits) + outputs[2:]
1739
+ return ((loss,) + output) if loss is not None else output
1740
+
1741
+ return TFQuestionAnsweringModelOutput(
1742
+ loss=loss,
1743
+ start_logits=start_logits,
1744
+ end_logits=end_logits,
1745
+ hidden_states=outputs.hidden_states,
1746
+ attentions=outputs.attentions,
1747
+ )
1748
+
1749
+ def build(self, input_shape=None):
1750
+ if self.built:
1751
+ return
1752
+ self.built = True
1753
+ if getattr(self, "mobilebert", None) is not None:
1754
+ with tf.name_scope(self.mobilebert.name):
1755
+ self.mobilebert.build(None)
1756
+ if getattr(self, "qa_outputs", None) is not None:
1757
+ with tf.name_scope(self.qa_outputs.name):
1758
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1759
+
1760
+
1761
+ @add_start_docstrings(
1762
+ """
1763
+ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1764
+ a softmax) e.g. for RocStories/SWAG tasks.
1765
+ """,
1766
+ MOBILEBERT_START_DOCSTRING,
1767
+ )
1768
+ class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoiceLoss):
1769
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1770
+ _keys_to_ignore_on_load_unexpected = [
1771
+ r"predictions___cls",
1772
+ r"seq_relationship___cls",
1773
+ r"cls.predictions",
1774
+ r"cls.seq_relationship",
1775
+ ]
1776
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1777
+
1778
+ def __init__(self, config, *inputs, **kwargs):
1779
+ super().__init__(config, *inputs, **kwargs)
1780
+
1781
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
1782
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1783
+ self.classifier = keras.layers.Dense(
1784
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1785
+ )
1786
+ self.config = config
1787
+
1788
+ @unpack_inputs
1789
+ @add_start_docstrings_to_model_forward(
1790
+ MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1791
+ )
1792
+ @add_code_sample_docstrings(
1793
+ checkpoint=_CHECKPOINT_FOR_DOC,
1794
+ output_type=TFMultipleChoiceModelOutput,
1795
+ config_class=_CONFIG_FOR_DOC,
1796
+ )
1797
+ def call(
1798
+ self,
1799
+ input_ids: TFModelInputType | None = None,
1800
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1801
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1802
+ position_ids: np.ndarray | tf.Tensor | None = None,
1803
+ head_mask: np.ndarray | tf.Tensor | None = None,
1804
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1805
+ output_attentions: Optional[bool] = None,
1806
+ output_hidden_states: Optional[bool] = None,
1807
+ return_dict: Optional[bool] = None,
1808
+ labels: np.ndarray | tf.Tensor | None = None,
1809
+ training: Optional[bool] = False,
1810
+ ) -> Union[Tuple, TFMultipleChoiceModelOutput]:
1811
+ r"""
1812
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1813
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1814
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1815
+ """
1816
+ if input_ids is not None:
1817
+ num_choices = shape_list(input_ids)[1]
1818
+ seq_length = shape_list(input_ids)[2]
1819
+ else:
1820
+ num_choices = shape_list(inputs_embeds)[1]
1821
+ seq_length = shape_list(inputs_embeds)[2]
1822
+
1823
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1824
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1825
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1826
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1827
+ flat_inputs_embeds = (
1828
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1829
+ if inputs_embeds is not None
1830
+ else None
1831
+ )
1832
+ outputs = self.mobilebert(
1833
+ flat_input_ids,
1834
+ flat_attention_mask,
1835
+ flat_token_type_ids,
1836
+ flat_position_ids,
1837
+ head_mask,
1838
+ flat_inputs_embeds,
1839
+ output_attentions,
1840
+ output_hidden_states,
1841
+ return_dict=return_dict,
1842
+ training=training,
1843
+ )
1844
+ pooled_output = outputs[1]
1845
+ pooled_output = self.dropout(pooled_output, training=training)
1846
+ logits = self.classifier(pooled_output)
1847
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1848
+
1849
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1850
+
1851
+ if not return_dict:
1852
+ output = (reshaped_logits,) + outputs[2:]
1853
+ return ((loss,) + output) if loss is not None else output
1854
+
1855
+ return TFMultipleChoiceModelOutput(
1856
+ loss=loss,
1857
+ logits=reshaped_logits,
1858
+ hidden_states=outputs.hidden_states,
1859
+ attentions=outputs.attentions,
1860
+ )
1861
+
1862
+ def build(self, input_shape=None):
1863
+ if self.built:
1864
+ return
1865
+ self.built = True
1866
+ if getattr(self, "mobilebert", None) is not None:
1867
+ with tf.name_scope(self.mobilebert.name):
1868
+ self.mobilebert.build(None)
1869
+ if getattr(self, "classifier", None) is not None:
1870
+ with tf.name_scope(self.classifier.name):
1871
+ self.classifier.build([None, None, self.config.hidden_size])
1872
+
1873
+
1874
+ @add_start_docstrings(
1875
+ """
1876
+ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1877
+ for Named-Entity-Recognition (NER) tasks.
1878
+ """,
1879
+ MOBILEBERT_START_DOCSTRING,
1880
+ )
1881
+ class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenClassificationLoss):
1882
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1883
+ _keys_to_ignore_on_load_unexpected = [
1884
+ r"pooler",
1885
+ r"predictions___cls",
1886
+ r"seq_relationship___cls",
1887
+ r"cls.predictions",
1888
+ r"cls.seq_relationship",
1889
+ ]
1890
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1891
+
1892
+ def __init__(self, config, *inputs, **kwargs):
1893
+ super().__init__(config, *inputs, **kwargs)
1894
+ self.num_labels = config.num_labels
1895
+
1896
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
1897
+ classifier_dropout = (
1898
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1899
+ )
1900
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1901
+ self.classifier = keras.layers.Dense(
1902
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1903
+ )
1904
+ self.config = config
1905
+
1906
+ @unpack_inputs
1907
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1908
+ @add_code_sample_docstrings(
1909
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
1910
+ output_type=TFTokenClassifierOutput,
1911
+ config_class=_CONFIG_FOR_DOC,
1912
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
1913
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
1914
+ )
1915
+ def call(
1916
+ self,
1917
+ input_ids: TFModelInputType | None = None,
1918
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1919
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1920
+ position_ids: np.ndarray | tf.Tensor | None = None,
1921
+ head_mask: np.ndarray | tf.Tensor | None = None,
1922
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1923
+ output_attentions: Optional[bool] = None,
1924
+ output_hidden_states: Optional[bool] = None,
1925
+ return_dict: Optional[bool] = None,
1926
+ labels: np.ndarray | tf.Tensor | None = None,
1927
+ training: Optional[bool] = False,
1928
+ ) -> Union[Tuple, TFTokenClassifierOutput]:
1929
+ r"""
1930
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1931
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1932
+ """
1933
+ outputs = self.mobilebert(
1934
+ input_ids,
1935
+ attention_mask=attention_mask,
1936
+ token_type_ids=token_type_ids,
1937
+ position_ids=position_ids,
1938
+ head_mask=head_mask,
1939
+ inputs_embeds=inputs_embeds,
1940
+ output_attentions=output_attentions,
1941
+ output_hidden_states=output_hidden_states,
1942
+ return_dict=return_dict,
1943
+ training=training,
1944
+ )
1945
+ sequence_output = outputs[0]
1946
+
1947
+ sequence_output = self.dropout(sequence_output, training=training)
1948
+ logits = self.classifier(sequence_output)
1949
+
1950
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1951
+
1952
+ if not return_dict:
1953
+ output = (logits,) + outputs[2:]
1954
+ return ((loss,) + output) if loss is not None else output
1955
+
1956
+ return TFTokenClassifierOutput(
1957
+ loss=loss,
1958
+ logits=logits,
1959
+ hidden_states=outputs.hidden_states,
1960
+ attentions=outputs.attentions,
1961
+ )
1962
+
1963
+ def build(self, input_shape=None):
1964
+ if self.built:
1965
+ return
1966
+ self.built = True
1967
+ if getattr(self, "mobilebert", None) is not None:
1968
+ with tf.name_scope(self.mobilebert.name):
1969
+ self.mobilebert.build(None)
1970
+ if getattr(self, "classifier", None) is not None:
1971
+ with tf.name_scope(self.classifier.name):
1972
+ self.classifier.build([None, None, self.config.hidden_size])
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert.py ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Tokenization classes for MobileBERT."""
17
+
18
+
19
+ import collections
20
+ import os
21
+ import unicodedata
22
+ from typing import List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
31
+
32
+ PRETRAINED_VOCAB_FILES_MAP = {
33
+ "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"}
34
+ }
35
+
36
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512}
37
+
38
+
39
+ PRETRAINED_INIT_CONFIGURATION = {}
40
+
41
+
42
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
43
+ def load_vocab(vocab_file):
44
+ """Loads a vocabulary file into a dictionary."""
45
+ vocab = collections.OrderedDict()
46
+ with open(vocab_file, "r", encoding="utf-8") as reader:
47
+ tokens = reader.readlines()
48
+ for index, token in enumerate(tokens):
49
+ token = token.rstrip("\n")
50
+ vocab[token] = index
51
+ return vocab
52
+
53
+
54
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
55
+ def whitespace_tokenize(text):
56
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
57
+ text = text.strip()
58
+ if not text:
59
+ return []
60
+ tokens = text.split()
61
+ return tokens
62
+
63
+
64
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with BERT->MobileBERT,Bert->MobileBert
65
+ class MobileBertTokenizer(PreTrainedTokenizer):
66
+ r"""
67
+ Construct a MobileBERT tokenizer. Based on WordPiece.
68
+
69
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
70
+ this superclass for more information regarding those methods.
71
+
72
+ Args:
73
+ vocab_file (`str`):
74
+ File containing the vocabulary.
75
+ do_lower_case (`bool`, *optional*, defaults to `True`):
76
+ Whether or not to lowercase the input when tokenizing.
77
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
78
+ Whether or not to do basic tokenization before WordPiece.
79
+ never_split (`Iterable`, *optional*):
80
+ Collection of tokens which will never be split during tokenization. Only has an effect when
81
+ `do_basic_tokenize=True`
82
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
83
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
84
+ token instead.
85
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
86
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
87
+ sequence classification or for a text and a question for question answering. It is also used as the last
88
+ token of a sequence built with special tokens.
89
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
90
+ The token used for padding, for example when batching sequences of different lengths.
91
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
92
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
93
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
94
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
95
+ The token used for masking values. This is the token used when training this model with masked language
96
+ modeling. This is the token which the model will try to predict.
97
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
98
+ Whether or not to tokenize Chinese characters.
99
+
100
+ This should likely be deactivated for Japanese (see this
101
+ [issue](https://github.com/huggingface/transformers/issues/328)).
102
+ strip_accents (`bool`, *optional*):
103
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
104
+ value for `lowercase` (as in the original MobileBERT).
105
+ """
106
+
107
+ vocab_files_names = VOCAB_FILES_NAMES
108
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
109
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
110
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_file,
115
+ do_lower_case=True,
116
+ do_basic_tokenize=True,
117
+ never_split=None,
118
+ unk_token="[UNK]",
119
+ sep_token="[SEP]",
120
+ pad_token="[PAD]",
121
+ cls_token="[CLS]",
122
+ mask_token="[MASK]",
123
+ tokenize_chinese_chars=True,
124
+ strip_accents=None,
125
+ **kwargs,
126
+ ):
127
+ if not os.path.isfile(vocab_file):
128
+ raise ValueError(
129
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
130
+ " model use `tokenizer = MobileBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
131
+ )
132
+ self.vocab = load_vocab(vocab_file)
133
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
134
+ self.do_basic_tokenize = do_basic_tokenize
135
+ if do_basic_tokenize:
136
+ self.basic_tokenizer = BasicTokenizer(
137
+ do_lower_case=do_lower_case,
138
+ never_split=never_split,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ )
142
+
143
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
144
+
145
+ super().__init__(
146
+ do_lower_case=do_lower_case,
147
+ do_basic_tokenize=do_basic_tokenize,
148
+ never_split=never_split,
149
+ unk_token=unk_token,
150
+ sep_token=sep_token,
151
+ pad_token=pad_token,
152
+ cls_token=cls_token,
153
+ mask_token=mask_token,
154
+ tokenize_chinese_chars=tokenize_chinese_chars,
155
+ strip_accents=strip_accents,
156
+ **kwargs,
157
+ )
158
+
159
+ @property
160
+ def do_lower_case(self):
161
+ return self.basic_tokenizer.do_lower_case
162
+
163
+ @property
164
+ def vocab_size(self):
165
+ return len(self.vocab)
166
+
167
+ def get_vocab(self):
168
+ return dict(self.vocab, **self.added_tokens_encoder)
169
+
170
+ def _tokenize(self, text, split_special_tokens=False):
171
+ split_tokens = []
172
+ if self.do_basic_tokenize:
173
+ for token in self.basic_tokenizer.tokenize(
174
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
175
+ ):
176
+ # If the token is part of the never_split set
177
+ if token in self.basic_tokenizer.never_split:
178
+ split_tokens.append(token)
179
+ else:
180
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
181
+ else:
182
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
183
+ return split_tokens
184
+
185
+ def _convert_token_to_id(self, token):
186
+ """Converts a token (str) in an id using the vocab."""
187
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
188
+
189
+ def _convert_id_to_token(self, index):
190
+ """Converts an index (integer) in a token (str) using the vocab."""
191
+ return self.ids_to_tokens.get(index, self.unk_token)
192
+
193
+ def convert_tokens_to_string(self, tokens):
194
+ """Converts a sequence of tokens (string) in a single string."""
195
+ out_string = " ".join(tokens).replace(" ##", "").strip()
196
+ return out_string
197
+
198
+ def build_inputs_with_special_tokens(
199
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
200
+ ) -> List[int]:
201
+ """
202
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
203
+ adding special tokens. A MobileBERT sequence has the following format:
204
+
205
+ - single sequence: `[CLS] X [SEP]`
206
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
207
+
208
+ Args:
209
+ token_ids_0 (`List[int]`):
210
+ List of IDs to which the special tokens will be added.
211
+ token_ids_1 (`List[int]`, *optional*):
212
+ Optional second list of IDs for sequence pairs.
213
+
214
+ Returns:
215
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
216
+ """
217
+ if token_ids_1 is None:
218
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
219
+ cls = [self.cls_token_id]
220
+ sep = [self.sep_token_id]
221
+ return cls + token_ids_0 + sep + token_ids_1 + sep
222
+
223
+ def get_special_tokens_mask(
224
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
225
+ ) -> List[int]:
226
+ """
227
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
228
+ special tokens using the tokenizer `prepare_for_model` method.
229
+
230
+ Args:
231
+ token_ids_0 (`List[int]`):
232
+ List of IDs.
233
+ token_ids_1 (`List[int]`, *optional*):
234
+ Optional second list of IDs for sequence pairs.
235
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
236
+ Whether or not the token list is already formatted with special tokens for the model.
237
+
238
+ Returns:
239
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
240
+ """
241
+
242
+ if already_has_special_tokens:
243
+ return super().get_special_tokens_mask(
244
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
245
+ )
246
+
247
+ if token_ids_1 is not None:
248
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
249
+ return [1] + ([0] * len(token_ids_0)) + [1]
250
+
251
+ def create_token_type_ids_from_sequences(
252
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
253
+ ) -> List[int]:
254
+ """
255
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence
256
+ pair mask has the following format:
257
+
258
+ ```
259
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
260
+ | first sequence | second sequence |
261
+ ```
262
+
263
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
264
+
265
+ Args:
266
+ token_ids_0 (`List[int]`):
267
+ List of IDs.
268
+ token_ids_1 (`List[int]`, *optional*):
269
+ Optional second list of IDs for sequence pairs.
270
+
271
+ Returns:
272
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
273
+ """
274
+ sep = [self.sep_token_id]
275
+ cls = [self.cls_token_id]
276
+ if token_ids_1 is None:
277
+ return len(cls + token_ids_0 + sep) * [0]
278
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
279
+
280
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
281
+ index = 0
282
+ if os.path.isdir(save_directory):
283
+ vocab_file = os.path.join(
284
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
285
+ )
286
+ else:
287
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
288
+ with open(vocab_file, "w", encoding="utf-8") as writer:
289
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
290
+ if index != token_index:
291
+ logger.warning(
292
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
293
+ " Please check that the vocabulary is not corrupted!"
294
+ )
295
+ index = token_index
296
+ writer.write(token + "\n")
297
+ index += 1
298
+ return (vocab_file,)
299
+
300
+
301
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
302
+ class BasicTokenizer(object):
303
+ """
304
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
305
+
306
+ Args:
307
+ do_lower_case (`bool`, *optional*, defaults to `True`):
308
+ Whether or not to lowercase the input when tokenizing.
309
+ never_split (`Iterable`, *optional*):
310
+ Collection of tokens which will never be split during tokenization. Only has an effect when
311
+ `do_basic_tokenize=True`
312
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
313
+ Whether or not to tokenize Chinese characters.
314
+
315
+ This should likely be deactivated for Japanese (see this
316
+ [issue](https://github.com/huggingface/transformers/issues/328)).
317
+ strip_accents (`bool`, *optional*):
318
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
319
+ value for `lowercase` (as in the original BERT).
320
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
321
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
322
+ the full context of the words, such as contractions.
323
+ """
324
+
325
+ def __init__(
326
+ self,
327
+ do_lower_case=True,
328
+ never_split=None,
329
+ tokenize_chinese_chars=True,
330
+ strip_accents=None,
331
+ do_split_on_punc=True,
332
+ ):
333
+ if never_split is None:
334
+ never_split = []
335
+ self.do_lower_case = do_lower_case
336
+ self.never_split = set(never_split)
337
+ self.tokenize_chinese_chars = tokenize_chinese_chars
338
+ self.strip_accents = strip_accents
339
+ self.do_split_on_punc = do_split_on_punc
340
+
341
+ def tokenize(self, text, never_split=None):
342
+ """
343
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
344
+
345
+ Args:
346
+ never_split (`List[str]`, *optional*)
347
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
348
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
349
+ """
350
+ # union() returns a new set by concatenating the two sets.
351
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
352
+ text = self._clean_text(text)
353
+
354
+ # This was added on November 1st, 2018 for the multilingual and Chinese
355
+ # models. This is also applied to the English models now, but it doesn't
356
+ # matter since the English models were not trained on any Chinese data
357
+ # and generally don't have any Chinese data in them (there are Chinese
358
+ # characters in the vocabulary because Wikipedia does have some Chinese
359
+ # words in the English Wikipedia.).
360
+ if self.tokenize_chinese_chars:
361
+ text = self._tokenize_chinese_chars(text)
362
+ # prevents treating the same character with different unicode codepoints as different characters
363
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
364
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
365
+ split_tokens = []
366
+ for token in orig_tokens:
367
+ if token not in never_split:
368
+ if self.do_lower_case:
369
+ token = token.lower()
370
+ if self.strip_accents is not False:
371
+ token = self._run_strip_accents(token)
372
+ elif self.strip_accents:
373
+ token = self._run_strip_accents(token)
374
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
375
+
376
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
377
+ return output_tokens
378
+
379
+ def _run_strip_accents(self, text):
380
+ """Strips accents from a piece of text."""
381
+ text = unicodedata.normalize("NFD", text)
382
+ output = []
383
+ for char in text:
384
+ cat = unicodedata.category(char)
385
+ if cat == "Mn":
386
+ continue
387
+ output.append(char)
388
+ return "".join(output)
389
+
390
+ def _run_split_on_punc(self, text, never_split=None):
391
+ """Splits punctuation on a piece of text."""
392
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
393
+ return [text]
394
+ chars = list(text)
395
+ i = 0
396
+ start_new_word = True
397
+ output = []
398
+ while i < len(chars):
399
+ char = chars[i]
400
+ if _is_punctuation(char):
401
+ output.append([char])
402
+ start_new_word = True
403
+ else:
404
+ if start_new_word:
405
+ output.append([])
406
+ start_new_word = False
407
+ output[-1].append(char)
408
+ i += 1
409
+
410
+ return ["".join(x) for x in output]
411
+
412
+ def _tokenize_chinese_chars(self, text):
413
+ """Adds whitespace around any CJK character."""
414
+ output = []
415
+ for char in text:
416
+ cp = ord(char)
417
+ if self._is_chinese_char(cp):
418
+ output.append(" ")
419
+ output.append(char)
420
+ output.append(" ")
421
+ else:
422
+ output.append(char)
423
+ return "".join(output)
424
+
425
+ def _is_chinese_char(self, cp):
426
+ """Checks whether CP is the codepoint of a CJK character."""
427
+ # This defines a "chinese character" as anything in the CJK Unicode block:
428
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
429
+ #
430
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
431
+ # despite its name. The modern Korean Hangul alphabet is a different block,
432
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
433
+ # space-separated words, so they are not treated specially and handled
434
+ # like the all of the other languages.
435
+ if (
436
+ (cp >= 0x4E00 and cp <= 0x9FFF)
437
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
438
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
439
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
440
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
441
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
442
+ or (cp >= 0xF900 and cp <= 0xFAFF)
443
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
444
+ ): #
445
+ return True
446
+
447
+ return False
448
+
449
+ def _clean_text(self, text):
450
+ """Performs invalid character removal and whitespace cleanup on text."""
451
+ output = []
452
+ for char in text:
453
+ cp = ord(char)
454
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
455
+ continue
456
+ if _is_whitespace(char):
457
+ output.append(" ")
458
+ else:
459
+ output.append(char)
460
+ return "".join(output)
461
+
462
+
463
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
464
+ class WordpieceTokenizer(object):
465
+ """Runs WordPiece tokenization."""
466
+
467
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
468
+ self.vocab = vocab
469
+ self.unk_token = unk_token
470
+ self.max_input_chars_per_word = max_input_chars_per_word
471
+
472
+ def tokenize(self, text):
473
+ """
474
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
475
+ tokenization using the given vocabulary.
476
+
477
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
478
+
479
+ Args:
480
+ text: A single token or whitespace separated tokens. This should have
481
+ already been passed through *BasicTokenizer*.
482
+
483
+ Returns:
484
+ A list of wordpiece tokens.
485
+ """
486
+
487
+ output_tokens = []
488
+ for token in whitespace_tokenize(text):
489
+ chars = list(token)
490
+ if len(chars) > self.max_input_chars_per_word:
491
+ output_tokens.append(self.unk_token)
492
+ continue
493
+
494
+ is_bad = False
495
+ start = 0
496
+ sub_tokens = []
497
+ while start < len(chars):
498
+ end = len(chars)
499
+ cur_substr = None
500
+ while start < end:
501
+ substr = "".join(chars[start:end])
502
+ if start > 0:
503
+ substr = "##" + substr
504
+ if substr in self.vocab:
505
+ cur_substr = substr
506
+ break
507
+ end -= 1
508
+ if cur_substr is None:
509
+ is_bad = True
510
+ break
511
+ sub_tokens.append(cur_substr)
512
+ start = end
513
+
514
+ if is_bad:
515
+ output_tokens.append(self.unk_token)
516
+ else:
517
+ output_tokens.extend(sub_tokens)
518
+ return output_tokens
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert_fast.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Tokenization classes for MobileBERT."""
17
+
18
+ import json
19
+ from typing import List, Optional, Tuple
20
+
21
+ from tokenizers import normalizers
22
+
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import logging
25
+ from .tokenization_mobilebert import MobileBertTokenizer
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
31
+
32
+ PRETRAINED_VOCAB_FILES_MAP = {
33
+ "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
34
+ "tokenizer_file": {
35
+ "mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
36
+ },
37
+ }
38
+
39
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512}
40
+
41
+
42
+ PRETRAINED_INIT_CONFIGURATION = {}
43
+
44
+
45
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with BERT->MobileBERT,Bert->MobileBert
46
+ class MobileBertTokenizerFast(PreTrainedTokenizerFast):
47
+ r"""
48
+ Construct a "fast" MobileBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
49
+
50
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
51
+ refer to this superclass for more information regarding those methods.
52
+
53
+ Args:
54
+ vocab_file (`str`):
55
+ File containing the vocabulary.
56
+ do_lower_case (`bool`, *optional*, defaults to `True`):
57
+ Whether or not to lowercase the input when tokenizing.
58
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
59
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
60
+ token instead.
61
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
62
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
63
+ sequence classification or for a text and a question for question answering. It is also used as the last
64
+ token of a sequence built with special tokens.
65
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
66
+ The token used for padding, for example when batching sequences of different lengths.
67
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
68
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
69
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
70
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
71
+ The token used for masking values. This is the token used when training this model with masked language
72
+ modeling. This is the token which the model will try to predict.
73
+ clean_text (`bool`, *optional*, defaults to `True`):
74
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
75
+ whitespaces by the classic one.
76
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
77
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
78
+ issue](https://github.com/huggingface/transformers/issues/328)).
79
+ strip_accents (`bool`, *optional*):
80
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
81
+ value for `lowercase` (as in the original MobileBERT).
82
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
83
+ The prefix for subwords.
84
+ """
85
+
86
+ vocab_files_names = VOCAB_FILES_NAMES
87
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
88
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
89
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
90
+ slow_tokenizer_class = MobileBertTokenizer
91
+
92
+ def __init__(
93
+ self,
94
+ vocab_file=None,
95
+ tokenizer_file=None,
96
+ do_lower_case=True,
97
+ unk_token="[UNK]",
98
+ sep_token="[SEP]",
99
+ pad_token="[PAD]",
100
+ cls_token="[CLS]",
101
+ mask_token="[MASK]",
102
+ tokenize_chinese_chars=True,
103
+ strip_accents=None,
104
+ **kwargs,
105
+ ):
106
+ super().__init__(
107
+ vocab_file,
108
+ tokenizer_file=tokenizer_file,
109
+ do_lower_case=do_lower_case,
110
+ unk_token=unk_token,
111
+ sep_token=sep_token,
112
+ pad_token=pad_token,
113
+ cls_token=cls_token,
114
+ mask_token=mask_token,
115
+ tokenize_chinese_chars=tokenize_chinese_chars,
116
+ strip_accents=strip_accents,
117
+ **kwargs,
118
+ )
119
+
120
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
121
+ if (
122
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
123
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
124
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
125
+ ):
126
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
127
+ normalizer_state["lowercase"] = do_lower_case
128
+ normalizer_state["strip_accents"] = strip_accents
129
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
130
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
131
+
132
+ self.do_lower_case = do_lower_case
133
+
134
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
135
+ """
136
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
137
+ adding special tokens. A MobileBERT sequence has the following format:
138
+
139
+ - single sequence: `[CLS] X [SEP]`
140
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
141
+
142
+ Args:
143
+ token_ids_0 (`List[int]`):
144
+ List of IDs to which the special tokens will be added.
145
+ token_ids_1 (`List[int]`, *optional*):
146
+ Optional second list of IDs for sequence pairs.
147
+
148
+ Returns:
149
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
150
+ """
151
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
152
+
153
+ if token_ids_1 is not None:
154
+ output += token_ids_1 + [self.sep_token_id]
155
+
156
+ return output
157
+
158
+ def create_token_type_ids_from_sequences(
159
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
160
+ ) -> List[int]:
161
+ """
162
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence
163
+ pair mask has the following format:
164
+
165
+ ```
166
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
167
+ | first sequence | second sequence |
168
+ ```
169
+
170
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
171
+
172
+ Args:
173
+ token_ids_0 (`List[int]`):
174
+ List of IDs.
175
+ token_ids_1 (`List[int]`, *optional*):
176
+ Optional second list of IDs for sequence pairs.
177
+
178
+ Returns:
179
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
180
+ """
181
+ sep = [self.sep_token_id]
182
+ cls = [self.cls_token_id]
183
+ if token_ids_1 is None:
184
+ return len(cls + token_ids_0 + sep) * [0]
185
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
186
+
187
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
188
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
189
+ return tuple(files)
env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tokenizers_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
27
+ "tokenization_perceiver": ["PerceiverTokenizer"],
28
+ }
29
+
30
+ try:
31
+ if not is_vision_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["feature_extraction_perceiver"] = ["PerceiverFeatureExtractor"]
37
+ _import_structure["image_processing_perceiver"] = ["PerceiverImageProcessor"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_perceiver"] = [
46
+ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "PerceiverForImageClassificationConvProcessing",
48
+ "PerceiverForImageClassificationFourier",
49
+ "PerceiverForImageClassificationLearned",
50
+ "PerceiverForMaskedLM",
51
+ "PerceiverForMultimodalAutoencoding",
52
+ "PerceiverForOpticalFlow",
53
+ "PerceiverForSequenceClassification",
54
+ "PerceiverLayer",
55
+ "PerceiverModel",
56
+ "PerceiverPreTrainedModel",
57
+ ]
58
+
59
+
60
+ if TYPE_CHECKING:
61
+ from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
62
+ from .tokenization_perceiver import PerceiverTokenizer
63
+
64
+ try:
65
+ if not is_vision_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .feature_extraction_perceiver import PerceiverFeatureExtractor
71
+ from .image_processing_perceiver import PerceiverImageProcessor
72
+
73
+ try:
74
+ if not is_torch_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ from .modeling_perceiver import (
80
+ PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
81
+ PerceiverForImageClassificationConvProcessing,
82
+ PerceiverForImageClassificationFourier,
83
+ PerceiverForImageClassificationLearned,
84
+ PerceiverForMaskedLM,
85
+ PerceiverForMultimodalAutoencoding,
86
+ PerceiverForOpticalFlow,
87
+ PerceiverForSequenceClassification,
88
+ PerceiverLayer,
89
+ PerceiverModel,
90
+ PerceiverPreTrainedModel,
91
+ )
92
+
93
+ else:
94
+ import sys
95
+
96
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc ADDED
Binary file (9.93 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/convert_perceiver_haiku_to_pytorch.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc ADDED
Binary file (105 kB). View file